blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8cfcba3e20c4b93a0fe4a250a491f169270afabe
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/FrequencyRuleDetail.py
|
b846e57f4e1d54ec0a6cc953f0a0723b7ba9ddfc
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,587
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class FrequencyRuleDetail(object):
def __init__(self):
self._frequency_duration = None
self._frequency_type = None
self._total_times = None
self._used_times = None
@property
def frequency_duration(self):
return self._frequency_duration
@frequency_duration.setter
def frequency_duration(self, value):
self._frequency_duration = value
@property
def frequency_type(self):
return self._frequency_type
@frequency_type.setter
def frequency_type(self, value):
self._frequency_type = value
@property
def total_times(self):
return self._total_times
@total_times.setter
def total_times(self, value):
self._total_times = value
@property
def used_times(self):
return self._used_times
@used_times.setter
def used_times(self, value):
self._used_times = value
def to_alipay_dict(self):
params = dict()
if self.frequency_duration:
if hasattr(self.frequency_duration, 'to_alipay_dict'):
params['frequency_duration'] = self.frequency_duration.to_alipay_dict()
else:
params['frequency_duration'] = self.frequency_duration
if self.frequency_type:
if hasattr(self.frequency_type, 'to_alipay_dict'):
params['frequency_type'] = self.frequency_type.to_alipay_dict()
else:
params['frequency_type'] = self.frequency_type
if self.total_times:
if hasattr(self.total_times, 'to_alipay_dict'):
params['total_times'] = self.total_times.to_alipay_dict()
else:
params['total_times'] = self.total_times
if self.used_times:
if hasattr(self.used_times, 'to_alipay_dict'):
params['used_times'] = self.used_times.to_alipay_dict()
else:
params['used_times'] = self.used_times
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = FrequencyRuleDetail()
if 'frequency_duration' in d:
o.frequency_duration = d['frequency_duration']
if 'frequency_type' in d:
o.frequency_type = d['frequency_type']
if 'total_times' in d:
o.total_times = d['total_times']
if 'used_times' in d:
o.used_times = d['used_times']
return o
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
ae6fc7d3c35d5c0d5f99d2a00ec6329685a66892
|
e6a17c35ca3c139909d38856ebffcf99b18bff1d
|
/news_feed/newsfeed/newsfeed/pipelines.py
|
9ec4f06e844038c899bca720bfdd9a9b83cad23b
|
[] |
no_license
|
kingomalek/web-scraping
|
cbe29b6c0fc15e987879986ac5ecd867ec959677
|
00d79d7972b350cb38d53cd43ea2a9ee8c20a0fe
|
refs/heads/master
| 2020-03-28T21:38:43.291440
| 2018-09-17T18:44:20
| 2018-09-17T18:44:20
| 149,117,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class NewsfeedPipeline(object):
def process_item(self, item, spider):
return item
|
[
"kingo.malek@gmail.com"
] |
kingo.malek@gmail.com
|
06a154a6ff2cdb789d37bab35001675a05b875c4
|
cb995863a3e9b3a676556820871de8dd0c54db3a
|
/tkinter.py
|
7deb8354fa65094a15edcfdf41c6370c3b865f09
|
[] |
no_license
|
J4rn3s/Assignments
|
c46c0e5066a84f2a0c0667f041a03e8dfca59cd8
|
7372464cfe50517dc8894e31fe40a6dc6eef873e
|
refs/heads/main
| 2023-05-30T17:40:51.478873
| 2021-06-19T12:32:15
| 2021-06-19T12:32:15
| 371,911,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
app = Tk()
app.title("TVN Game Show")
app.geometry('300x100+200+100')
b1 = Button(app, text = "Correct!", width = 10)
b1.pack(side = 'left', padx = 10, pady = 10)
b2 = Button(app, text = "Wrong!", width = 10)
b2.pack(side = 'right', padx = 10, pady = 10)
app.mainloop()
|
[
"noreply@github.com"
] |
J4rn3s.noreply@github.com
|
297a8f68eacdc7a9634aa7d5be0d8284bf7bb2c9
|
c08153289b66cd2c358d84311675c6eeb54a40c1
|
/sort/mergeSort.py
|
f2e36732d40b3080b3ab98bfe08afb09a76a16b0
|
[] |
no_license
|
wllps1988315/search
|
3496048eade03bf38d2e178919d68579bcf5e800
|
474fc728fa0aeae35c90140b7bf4055c5dd492d7
|
refs/heads/master
| 2021-05-12T01:18:34.130863
| 2018-05-02T12:58:10
| 2018-05-02T12:58:10
| 117,556,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
# -*- coding:utf-8 -*-
def mergeSort(alist):
print("Splitting ",alist)
if len(alist) > 1:
mid = len(alist) // 2
lefthalf = alist[:mid]
righthalf = alist[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
i=0
j=0
k=0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
alist[k] = lefthalf[i]
i = i + 1
else:
alist[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
alist[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
alist[k]=righthalf[j]
j=j+1
k=k+1
print("Merging ",alist)
alist = [54,26,93,17,77,31,44,55,20]
mergeSort(alist)
print(alist)
|
[
"wang_lan@gowild.cn"
] |
wang_lan@gowild.cn
|
4a0b34d2f8e40e95a1eab41133933cb56377886a
|
7f92c2fc131ca637d8b7c2a4dbba4b974884e786
|
/lab3/scripts/experiment1_data1.py
|
34d428b8ea9f437b4dfa13b2d7434f8402e13770
|
[] |
no_license
|
byronwasti/CircuitsLabs
|
2c5694f07a59adedddde361d0a85a690a83e096b
|
be1227c504ed1a2b81b6d670cbaa45d4b8be8e17
|
refs/heads/master
| 2020-05-23T11:15:14.853587
| 2017-09-03T18:53:50
| 2017-09-03T18:53:50
| 80,369,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,188
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import csv
TAKE_NEW_DATA = False
FILENAME = "data/experiment1_data2_3.csv"
if TAKE_NEW_DATA:
import smu
s = smu.smu()
f = open(FILENAME, "wb")
writer = csv.writer(f)
v_in = np.linspace(0.35, 0.9, 255)
i_b = []
i_e = []
s.set_voltage(2, 0.)
for v in v_in:
s.set_voltage(1, v)
s.autorange(1)
i_b.append(s.get_current(1))
i_e.append(-s.get_current(2))
s.set_voltage(1, 0.)
data = zip(v_in, i_b, i_e)
writer.writerow(["V_in(Ch1)", "I_b(Ch1)", "I_e(Ch2)"])
writer.writerows(data)
f.close()
x = v_in
y1 = i_b
y2 = i_e
if not TAKE_NEW_DATA:
with open(FILENAME, 'r') as f:
reader = csv.reader(f)
x = []
y1 = []
y2 = []
for i, row in enumerate(reader):
if i == 0 : continue
x.append(row[0])
y1.append(row[1])
y2.append(row[2])
if True:
plt.plot(x, y1, '.', label="i_b")
plt.figure()
plt.plot(x, y2, '.', label="i_e")
plt.legend()
plt.xlabel("Voltage")
plt.ylabel("Current")
plt.show()
|
[
"byron.wasti@gmail.com"
] |
byron.wasti@gmail.com
|
ddb880615853b09b4f915137cf0ffae2c24d6d4a
|
b57f1b57a7c97e049807dd7fc2468a06fd0ab1b5
|
/pdfimport/settings.py
|
5a759bc489d8e1174e7d2e15f9f57769d56dce1e
|
[] |
no_license
|
vsjakhar/PDFImportDjango
|
930f176737d3789f0e3ef6b3c566e47894b60d88
|
7a363d5db0f2aacd5adde4683f9977e84a487af3
|
refs/heads/master
| 2022-12-21T14:18:41.406788
| 2020-02-18T13:54:54
| 2020-02-18T13:54:54
| 241,169,539
| 0
| 0
| null | 2022-12-08T03:38:13
| 2020-02-17T17:38:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,297
|
py
|
"""
Django settings for pdfimport project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^k+5+0a4k9prdr%v809&x8v^uw7h@50kl!yw^yib!u4&$ni7-_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pdfimport.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pdfimport.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
DATA_UPLOAD_MAX_MEMORY_SIZE = 26214400
AUTH_USER_MODEL = 'home.User'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static/')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"vijayjakhar.vj@gmail.com"
] |
vijayjakhar.vj@gmail.com
|
d9eacd98ea6425409d95aa56128e592e61371963
|
eb7af309163957e5f0e96c56f4b864f68dffe854
|
/charpter17/03_function_3.py
|
08add8e16e206f8d4eaad3185cbd2603a53e17f9
|
[] |
no_license
|
chiminwon/Python365
|
302aaaa0b47b05afaad35b92f444eadf93d9d63b
|
7efc82b7e77e60cd4495888c2c65505395ee4a17
|
refs/heads/master
| 2023-01-19T11:58:40.702360
| 2020-11-20T11:59:33
| 2020-11-20T11:59:33
| 299,821,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
def change(b):
print(id(b))
# 指向的是同一个对象
b = 10
print(id(b))
# 一个新对象
a = 1
print(id(a))
change(a)
print(id(a))
|
[
"chiminwon@gmail.com"
] |
chiminwon@gmail.com
|
7554b628c55916d907873d94795fa04924df70e3
|
0cf4ddd70a3184098d8c5bbc7cef010233670ef8
|
/practica28.py
|
2e7167e85a2b77ca51a47241af19b883eb058648
|
[] |
no_license
|
Zerobitss/Python-101-ejercicios-proyectos
|
aadecfbd5306f5fedee389ae5262d5c42507ed51
|
4b51778cddf8a476c1f472e1ad0b05b17b7b2461
|
refs/heads/master
| 2023-02-27T12:07:24.460312
| 2021-02-13T21:48:09
| 2021-02-13T21:48:09
| 338,676,192
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
"""
Escribir un programa que almacene la cadena de caracteres contraseña en una variable, pregunte al usuario por la contraseña
hasta que introduzca la contraseña correcta.
"""
def run():
password = str(input("Ingresa contraseña: "))
re_password = str(input("Repite la contraseña: "))
while password != re_password:
print("Error contraseñas no coincen")
password = str(input("Ingresa nuevamente la contraseña: "))
re_password = str(input("Repite la contraseña: "))
if password == re_password:
print(f"Felicidades ambas contraseñas coinciden: {password}")
break
if __name__ == '__main__':
run()
|
[
"game4droidplay@gmail.com"
] |
game4droidplay@gmail.com
|
c7e0bb3087d8b71a2084ee14c60cbb3c3496faa8
|
38a3f9b2e71636bef436e4edfdbb454901644219
|
/pytext/models/pair_classification_model.py
|
7bd8a1bbedd8dfe08d9df1569c31f87a008121eb
|
[
"BSD-3-Clause"
] |
permissive
|
dimalik/pytext
|
917e35f973fd9fec7e8ae1cc476b42a2affd0511
|
1e4dd71b1a42350ba8a31123aa8d9d753ec7f7ce
|
refs/heads/master
| 2020-07-25T23:36:06.110597
| 2019-09-14T02:58:12
| 2019-09-14T02:59:57
| 208,457,838
| 0
| 0
|
NOASSERTION
| 2019-09-14T15:12:57
| 2019-09-14T15:12:57
| null |
UTF-8
|
Python
| false
| false
| 11,800
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import os
from typing import Dict, List, Tuple, Union
import torch
import torch.nn as nn
from pytext.data.tensorizers import LabelTensorizer, Tensorizer, TokenTensorizer
from pytext.models.decoders import DecoderBase
from pytext.models.decoders.mlp_decoder import MLPDecoder
from pytext.models.embeddings import EmbeddingBase, EmbeddingList, WordEmbedding
from pytext.models.model import BaseModel
from pytext.models.module import create_module
from pytext.models.output_layers import ClassificationOutputLayer, OutputLayerBase
from pytext.models.representations.bilstm_doc_attention import BiLSTMDocAttention
from pytext.models.representations.docnn import DocNNRepresentation
from pytext.models.representations.representation_base import RepresentationBase
from scipy.special import comb
class BasePairwiseModel(BaseModel):
"""
A base classification model that scores a pair of texts.
Subclasses need to implement the from_config, forward and save_modules.
"""
__EXPANSIBLE__ = True
class Config(BaseModel.Config):
decoder: MLPDecoder.Config = MLPDecoder.Config()
output_layer: ClassificationOutputLayer.Config = (
ClassificationOutputLayer.Config()
)
encode_relations: bool = True
def __init__(
self,
decoder: DecoderBase,
output_layer: OutputLayerBase,
encode_relations: bool,
) -> None:
super().__init__()
self.decoder = decoder
self.output_layer = output_layer
self.encode_relations = encode_relations
@classmethod
def from_config(cls, config: Config, tensorizers: Dict[str, Tensorizer]):
raise NotImplementedError
def forward(
self, input1: Tuple[torch.Tensor, ...], input2: Tuple[torch.Tensor, ...]
):
raise NotImplementedError
def save_modules(self, base_path: str = "", suffix: str = ""):
raise NotImplementedError
@classmethod
def _create_decoder(
cls,
config: Config,
representations: nn.ModuleList,
tensorizers: Dict[str, Tensorizer],
):
labels = tensorizers["labels"].vocab
num_reps = len(representations)
rep_dim = representations[0].representation_dim
decoder_in_dim = num_reps * rep_dim
if config.encode_relations:
decoder_in_dim += 2 * comb(num_reps, 2, exact=True) * rep_dim
decoder = create_module(
config.decoder, in_dim=decoder_in_dim, out_dim=len(labels)
)
return decoder
@classmethod
def _encode_relations(cls, encodings: List[torch.Tensor]) -> List[torch.Tensor]:
for rep_l, rep_r in itertools.combinations(encodings, 2):
encodings.append(torch.abs(rep_l - rep_r))
encodings.append(rep_l * rep_r)
return encodings
def _save_modules(self, modules: nn.ModuleList, base_path: str, suffix: str):
super().save_modules(base_path, suffix)
# Special case to also save the multi-representations separately, if needed.
for module in modules:
if getattr(module.config, "save_path", None):
path = module.config.save_path + suffix
if base_path:
path = os.path.join(base_path, path)
print(
f"Saving state of module {type(module).__name__} " f"to {path} ..."
)
torch.save(module.state_dict(), path)
class PairwiseModel(BasePairwiseModel):
"""
A classification model that scores a pair of texts, for example, a model for
natural language inference.
The model shares embedding space (so it doesn't support
pairs of texts where left and right are in different languages). It uses
bidirectional LSTM or CNN to represent the two documents, and concatenates
them along with their absolute difference and elementwise product. This
concatenated pair representation is passed to a multi-layer perceptron to
decode to label/target space.
See https://arxiv.org/pdf/1705.02364.pdf for more details.
It can be instantiated just like any other :class:`~Model`.
"""
EMBEDDINGS = ["embedding"]
INPUTS_PAIR = [["tokens1"], ["tokens2"]]
class Config(BasePairwiseModel.Config):
"""
Attributes:
encode_relations (bool): if `false`, return the concatenation of the two
representations; if `true`, also concatenate their pairwise absolute
difference and pairwise elementwise product (à la arXiv:1705.02364).
Default: `true`.
tied_representation: whether to use the same representation, with
tied weights, for all the input subrepresentations. Default: `true`.
"""
class ModelInput(BasePairwiseModel.Config.ModelInput):
tokens1: TokenTensorizer.Config = TokenTensorizer.Config(column="text1")
tokens2: TokenTensorizer.Config = TokenTensorizer.Config(column="text2")
labels: LabelTensorizer.Config = LabelTensorizer.Config()
inputs: ModelInput = ModelInput()
embedding: WordEmbedding.Config = WordEmbedding.Config()
representation: Union[
BiLSTMDocAttention.Config, DocNNRepresentation.Config
] = BiLSTMDocAttention.Config()
shared_representations: bool = True
def __init__(
self,
embeddings: nn.ModuleList,
representations: nn.ModuleList,
decoder: MLPDecoder,
output_layer: ClassificationOutputLayer,
encode_relations: bool,
) -> None:
super().__init__(decoder, output_layer, encode_relations)
self.embeddings = embeddings
self.representations = representations
# from_config and helper function
@classmethod
def _create_embedding(cls, config, tensorizer) -> EmbeddingBase:
return create_module(config, None, tensorizer)
@classmethod
def _create_embeddings(
cls, config: Config, tensorizers: Dict[str, Tensorizer]
) -> nn.ModuleList:
embeddings = []
for inputs in cls.INPUTS_PAIR:
embedding_list = []
for emb, input in zip(cls.EMBEDDINGS, inputs):
if hasattr(config, emb) and input in tensorizers:
embedding_list.append(
cls._create_embedding(getattr(config, emb), tensorizers[input])
)
if len(embedding_list) == 1:
embeddings.append(embedding_list[0])
else:
embeddings.append(EmbeddingList(embeddings=embedding_list, concat=True))
return nn.ModuleList(embeddings)
@classmethod
def _create_representations(cls, config: Config, embeddings: nn.ModuleList):
if config.shared_representations:
# create representation once and used for all embeddings
embedding_dim = embeddings[0].embedding_dim
representations = nn.ModuleList(
itertools.repeat(
create_module(config.representation, embed_dim=embedding_dim),
len(embeddings),
)
)
else:
representations = nn.ModuleList(
[
create_module(
config.representation, embed_dim=embedding.embedding_dim
)
for embedding in embeddings
]
)
return representations
@classmethod
def from_config(cls, config: Config, tensorizers: Dict[str, Tensorizer]):
embeddings = cls._create_embeddings(config, tensorizers)
representations = cls._create_representations(config, embeddings)
decoder = cls._create_decoder(config, representations, tensorizers)
output_layer = create_module(
config.output_layer, labels=tensorizers["labels"].vocab
)
return cls(
embeddings, representations, decoder, output_layer, config.encode_relations
)
def arrange_model_inputs(self, tensor_dict):
return tensor_dict["tokens1"][:2], tensor_dict["tokens2"][:2]
def arrange_targets(self, tensor_dict):
return tensor_dict["labels"]
# _encode and helper functions
@classmethod
def _represent_helper(
cls, rep: RepresentationBase, embs: torch.Tensor, lens: torch.Tensor
) -> torch.Tensor:
representation = rep(embs, lens)
if isinstance(representation, tuple):
return representation[0]
return representation
@classmethod
def _represent_sort(
cls,
embeddings: List[torch.Tensor],
lengths: List[torch.Tensor],
represention_modules: nn.ModuleList,
) -> List[torch.Tensor]:
"""
Apply the representations computations in `self.representations` to the
sentence representations in `embeddings`.
Internally, it sorts the sentences in `embeddings` by the number
of tokens for packing efficiency, where the number of tokens is in `lengths`,
and undoes the sort after applying the representations to preserve the
original ordering of sentences. Assumes that the leftmost sentences are
already sorted by number of tokens.
"""
if isinstance(represention_modules[0], BiLSTMDocAttention):
# The leftmost inputs already come sorted by length. The others need to
# be sorted as well, for packing. We do it manually.
sorted_inputs = [(embeddings[0], lengths[0])]
sorted_indices = [None]
for embs, lens in zip(embeddings[1:], lengths[1:]):
lens_sorted, sorted_idx = lens.sort(descending=True)
embs_sorted = embs[sorted_idx]
sorted_inputs.append((embs_sorted, lens_sorted))
sorted_indices.append(sorted_idx)
representations = [
cls._represent_helper(rep, embs, lens)
for rep, (embs, lens) in zip(represention_modules, sorted_inputs)
]
# Put the inputs back in the original order, so they still match up to
# each other as well as the targets.
unsorted_representations = [representations[0]]
for sorted_idx, rep in zip(sorted_indices[1:], representations[1:]):
_, unsorted_idx = sorted_idx.sort()
unsorted_representations.append(rep[unsorted_idx])
return unsorted_representations
else:
return [
cls._represent_helper(rep, embs, lens)
for rep, (embs, lens) in zip(
represention_modules, zip(embeddings, lengths)
)
]
def _represent(self, embeddings: List[torch.Tensor], seq_lens: List[torch.Tensor]):
representations = self._represent_sort(
embeddings, seq_lens, self.representations
)
if self.encode_relations:
representations = self._encode_relations(representations)
return torch.cat(representations, -1)
def forward(
self, input1: Tuple[torch.Tensor, ...], input2: Tuple[torch.Tensor, ...]
) -> torch.Tensor:
token_tups, seq_lens = (input1[:-1], input2[:-1]), (input1[-1], input2[-1])
embeddings = [
emb(*token_tup) for emb, token_tup in zip(self.embeddings, token_tups)
]
representation = self._represent(embeddings, seq_lens)
return self.decoder(representation)
def save_modules(self, base_path: str = "", suffix: str = ""):
self._save_modules(self.representations, base_path, suffix)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
c5782c245643b838896519a5bc26ab0d1bc46851
|
1957ddd8a3dfd3b0b1f6662ab6da43e2ddcd65de
|
/2prakticheskaya/Project4.py
|
7d5c1d13577d3e1ae10d2ce7694bc8fa479adb7c
|
[] |
no_license
|
Lightm0re/PythonProjects
|
5535ae4f4aa81d7e247c41a575e329ce3d7f5adb
|
8187589b19ac99d98da63943cdf09fc5e57c5fba
|
refs/heads/master
| 2021-09-05T13:32:28.831925
| 2018-01-28T07:09:34
| 2018-01-28T07:09:34
| 107,757,412
| 0
| 0
| null | 2017-11-27T14:10:23
| 2017-10-21T06:33:51
|
Python
|
UTF-8
|
Python
| false
| false
| 947
|
py
|
'''Дан список результатов попыток одного спортсмена для некоторого соревнования.
Написать функцию, которая считает сколько за сессию был установлен новый рекорд, т.е. текущее значение превышает значение максимального.
Например
Имеем список результатов.:
scores = [10, 5, 20, 20, 4, 5, 2, 25, 1].
В данном случае ответ: 2.'''
def adds():
scores = input("Введите список результатов:").split(' ')
print('Список результатов \n', scores)
count = 0
rec = scores[0]
for ind, el in enumerate(scores):
if int(el) > int(rec):
rec = el
count += 1
return 'Количество рекордов:', count
print(adds())
|
[
"noreply@github.com"
] |
Lightm0re.noreply@github.com
|
a4caa5c125fb88be2064c7a27f84753a9b2403a2
|
4a46f9d06515e61ef89ef0a5bfa1393cf218e68f
|
/contrib/testgen/base58.py
|
67b715c25dd628cb736b44d720f2d808fa84248c
|
[
"MIT"
] |
permissive
|
GroinGuy/GroinCoin-GXG
|
fe886d386fef948c818b4b34c59040791da45f3b
|
d71c1b200683a77ccf797d8a500e468351da5ee0
|
refs/heads/master
| 2020-05-21T14:04:39.761147
| 2019-02-02T13:52:52
| 2019-02-02T13:52:52
| 19,191,079
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,986
|
py
|
# Copyright (c) 2012-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Groincoin base58 encoding and decoding.
Based on https://groincointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Groincoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/groincoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
[
"mendozg@gmx.com"
] |
mendozg@gmx.com
|
2f3367fa233aac359784e8be07064e5bf08ef2fe
|
83d7af883df9f83e987b6401fe9a57ec73cacac0
|
/dynamic_programming/CtCI_8_5_recursive_multiply/Solution.py
|
497679635a4420698a33010d944b2332de9f5180
|
[] |
no_license
|
chopdev/leetcode_tasks
|
93f3b807e3e33a2735cb9558dbe0f1b39d7c08f9
|
89aee5e9064e1bfe9873d26836453f0c7507bbc4
|
refs/heads/master
| 2023-08-28T04:46:01.534860
| 2023-08-27T08:17:15
| 2023-08-27T08:17:15
| 135,029,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
# Recursive Multiply: Write a recursive function to multiply two positive integers without using
# the * operator (or / operator). You can use addition, subtraction, and bit shifting, but you should
# minimize the number of those operations.
# The idea is to divide smaller number on two, until it's 1
# when it's one, 1 * number = number
# 17 * 20
# (9 + 8) * 20
# (5 + 4 + 4 + 4) *20
# (3 + 2 + 2 + 2 + 2 + 2 + 2 + 2) * 20
# (1 + 2 + 1 + 1 + 1... + 1) * 20
# (1 + 1 + 1 ... + 1) * 20
# My solution
# if N*M then O(log(min(N, M))) is a space complexity for recursion tree
# O(logN) time
def multiply(num1: int, num2: int) -> int:
smaller = num1 if num1 >= num2 else num2
bigger = num2 if num1 >= num2 else num1
return multiply_rec(smaller, bigger)
def multiply_rec(smaller: int, bigger: int) -> int:
if smaller == 0: return 0
if smaller == 1: return bigger
rest = 0
if (smaller & 1 == 1): # if smaller is odd
rest = bigger
smaller -= 1 # remove one to make it even, but remember that 1 in rest
sum = multiply_rec(smaller >> 1, bigger) # divide smaller on 2
return sum + sum + rest # multiply back on 2
print(multiply(0, 0))
print(multiply(11, 0))
print(multiply(1, 2))
print(multiply(2, 2))
print(multiply(17, 21)) # 357
print(multiply(22, 10))
print(multiply(17, 20))
|
[
"taras.plavin@gmail.com"
] |
taras.plavin@gmail.com
|
6c3af5545e6bfcfdd613ef4bc3f7d871b00511c2
|
641a0e2db97e87a712f9d8bfbc43ada02dcfa707
|
/wsgi/openshift/photo_album/views.py
|
19b1113b959ad524531713ac4acb58042299a3a5
|
[] |
no_license
|
sagoyanfisic/openshift-cloudinary-django-sample
|
bf5ac2044be1eb493e6d64009d82f9eca296dd35
|
46f22bb4a9c251640e1c3b939e666384118e3e13
|
refs/heads/master
| 2021-01-10T03:55:46.352982
| 2016-04-05T22:40:23
| 2016-04-05T22:40:23
| 55,558,605
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
import json
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from cloudinary import api
from .models import Profile
def filter_nones(d):
return dict((k,v) for k,v in d.iteritems() if v is not None)
def list(request):
defaults = dict(format="jpg", height=150, width=150)
samples = [
dict(width = 120, height =120 ,
crop = 'thumb', gravity = 'face',
radius = 'max', effect = ""),
]
samples = [filter_nones(dict(defaults, **sample)) for sample in samples]
return render(request, 'list.html', dict(photos=Profile.objects.all(), samples=samples))
|
[
"sagoyanfisic1@gmail.com"
] |
sagoyanfisic1@gmail.com
|
258a37565841ee4d731973893e916a4420e4a63c
|
4bb36e95ea92cea2a707be16fefcd873f4fc6f4e
|
/mirrormirror.py
|
ef86f758a4c982bb82847ca1c4122f03b9f04fd4
|
[] |
no_license
|
mdhruv/mirrormirror
|
d734c4959108ee66d4b75393caef4e51d3d6ced8
|
bcb1776b9386f84af957f636295f4487f8c7b122
|
refs/heads/master
| 2021-01-20T11:17:07.023343
| 2019-02-07T03:43:53
| 2019-02-07T03:43:53
| 55,566,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
import os
import urllib
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainPage(webapp2.RequestHandler):
def get(self):
greeting = 'Meow Meow Meow'
template_values = {
'greeting': greeting,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([
('/', MainPage),
], debug=True)
|
[
"mikhail@Mikhails-MacBook-Pro.local"
] |
mikhail@Mikhails-MacBook-Pro.local
|
bca9b2cf1b50fa691d5a504ec98d956b07a26fdc
|
09f7fd68c3b0f898cd04ea88daca2a42ca19908b
|
/mapss/celery.py
|
4c26aee5e4d4fa367175465c5a70ea2741a283ad
|
[] |
no_license
|
MPI-MAPSS/MAPSSdb
|
512656b5d0c73ae019ea3acbcaee12617470cd77
|
982394c95c615a135ec6171c819c3f6eb29b56f7
|
refs/heads/master
| 2023-04-16T06:04:10.286241
| 2023-02-11T13:34:48
| 2023-02-11T13:34:48
| 600,424,257
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
import platform
if platform.system().lower() == "windows":
os.environ.setdefault("FORKED_BY_MULTIPROCESSING", "1")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mapss.settings')
app = Celery('mapss')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
|
[
"root@mpi-arches.server.shh.mpg.de"
] |
root@mpi-arches.server.shh.mpg.de
|
136cf267494f11774e3f8639873ca55c73edb135
|
3880b3a05a9781350a32a3b2ca7f6d8313c0a0e0
|
/Day6/wordcount/mycount/views.py
|
8475ad74c857c2831a69467f15c9383eba7bd9f6
|
[] |
no_license
|
roseline0912/LikeLion
|
60dc179b45460bd7ecd1aa0154f1512faf44feb9
|
9f2a3386544bd6e7f928494328ff8d8ca9d6feb1
|
refs/heads/master
| 2022-11-13T03:14:13.307939
| 2020-07-04T07:15:23
| 2020-07-04T07:15:23
| 274,402,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
from django.shortcuts import render
# Create your views here.
def index(request):
context ={}
if request.GET.get("myword"):
mysentence = request.GET.get("myword")
context["wordlen"] = len(mysentence.replace(" ", ""))
myword = mysentence.split(" ")
checkcnt = dict()
for i in myword:
if i in checkcnt:
checkcnt[i] += 1
else:
checkcnt[i] = 1
context["checkcnt"] =checkcnt
return render(request,'index.html',context)
|
[
"roseline0912@likelion.org"
] |
roseline0912@likelion.org
|
b979ee45340f7f2fc214f705b02ac0390ba4212d
|
2ba8b045dd9db51ca14c17c6e04383f113532445
|
/src/collective/saml2/testing.py
|
9f37cc920cd2c762d3edd8bb620cc36db10bf752
|
[] |
no_license
|
collective/collective.saml2
|
04c5cb1519108c8ccd95c827065d83afd5a33e57
|
1bb2eb09bed6201b2a48fb0dfa68946be6e03174
|
refs/heads/master
| 2023-08-28T11:40:53.034492
| 2022-11-22T14:51:23
| 2022-11-22T14:51:23
| 15,853,111
| 5
| 6
| null | 2022-11-22T14:51:24
| 2014-01-12T23:18:22
|
Python
|
UTF-8
|
Python
| false
| false
| 865
|
py
|
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.app.testing import IntegrationTesting
from plone.app.testing import applyProfile
from zope.configuration import xmlconfig
class CollectiveSAML2(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE, )
def setUpZope(self, app, configurationContext):
# Load ZCML for this package
import collective.saml2
xmlconfig.file('configure.zcml',
collective.saml2,
context=configurationContext)
def setUpPloneSite(self, portal):
applyProfile(portal, 'collective.saml2:default')
COLLECTIVE_SAML2_FIXTURE = CollectiveSAML2()
COLLECTIVE_SAML2_INTEGRATION_TESTING = \
IntegrationTesting(bases=(COLLECTIVE_SAML2_FIXTURE, ),
name="CollectiveSAML2:Integration")
|
[
"software@pretaweb.com"
] |
software@pretaweb.com
|
90ec448ee163c8b2dbceef7d3cf6a9bddda79de8
|
8e7d45ad0cd018a2cb6a12b077c93794a933959b
|
/leet/0904-fruit-into-baskets/fruit-into-baskets.py
|
8753c71eb955ef01cfeaf957ef3b78977c3eab8f
|
[] |
no_license
|
lugy-bupt/algorithm
|
550a28060615d824de34c66c117f76a83e07b708
|
43b663c125dfa1f78a0936f5caa601407cfb39f4
|
refs/heads/master
| 2020-04-10T12:22:39.765248
| 2019-07-09T08:09:18
| 2019-07-09T08:09:18
| 161,020,432
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
class Solution:
def totalFruit(self, tree: List[int]) -> int:
def getFruit(start: int) -> int:
ret = 0
dicts = {}
for i in range(start, len(tree)):
if tree[i] not in dicts and len(dicts) == 2:
break
dicts[tree[i]] = dicts.get(tree[i], 0) + 1
for i in dicts.values():
ret += i
return ret
ret = 0
i = 0
while i < len(tree):
if len(tree) - i <= ret:
break
ret = max(ret, getFruit(i))
while i + 1 < len(tree) and tree[i] == tree[i + 1]:
i += 1
i += 1
return ret
|
[
"noreply@github.com"
] |
lugy-bupt.noreply@github.com
|
1f5cf71313699b12469422f9ba4438ec8d5ae895
|
bd7aeb7d1b9d8aedbad2510015235a6e95f8f834
|
/src/mbox_move.py
|
020b0a9e0894eb5d6ee53afd1617fad1ed4159c2
|
[
"MIT"
] |
permissive
|
pzia/keepmydatas
|
7f60f7a5128380b74a7349ac9cb4125f2b938bbb
|
3783909119e7ece986a92b9e56ec53cddeb924e3
|
refs/heads/master
| 2021-05-19T22:48:15.389461
| 2020-12-25T11:24:39
| 2020-12-25T11:24:39
| 32,166,575
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Parse tree, find matching MBOX files, and move them info a new folder"""
import KmdCmd
import KmdFiles
import KmdMbox
import os
import re
import logging
class KmdFilesMove(KmdCmd.KmdCommand):
regexp = None
def extendParser(self):
super(KmdFilesMove, self).extendParser()
#Extend parser
self.parser.add_argument('tree', metavar='</path/to/tree>', nargs=1, help='The source tree')
self.parser.add_argument('regexp', metavar='<regexp>', default='.*(thunderbird|mozilla|message|mail|evolution|mbox|imap|lotus).*', nargs=1, help='Perl regexp matching files PATH (Example : .*thundebird.*')
self.parser.add_argument('folder', metavar='</path/to/dest>', nargs=1, help='Folder to put matching files')
def run(self):
self.regexp = re.compile(self.args.regexp[0], re.IGNORECASE)
logging.info("Parsing %s", self.args.tree[0])
for root, _, files in os.walk(self.args.tree[0]):
logging.debug("Walking in %s", root)
for name in files:
pname = os.path.join(root, name)
if self.regexp.match(pname) and KmdMbox.isFileMbox(pname):
logging.debug("Found %s", name)
dname = os.path.join(self.args.folder[0], name)
KmdFiles.fileMoveRename(pname, dname, self.args.doit)
if __name__ == "__main__":
cmd = KmdFilesMove(__doc__)
cmd.run()
|
[
"paparazzia@gmail.com"
] |
paparazzia@gmail.com
|
3ed3dd12b61c0bfff33ee52c0e26c742ceb72636
|
66ba8fe37044b65313164a98614b28ef4a220873
|
/testnmap/libnmap.py
|
8bf4bff11394388bb9ce9fd29ef7a2b70570f945
|
[] |
no_license
|
eroa/reconator
|
022d64b59882427064738bbf242b82a56c8de760
|
33c767155508ec49a5536977f8e16a06823c3853
|
refs/heads/master
| 2021-10-27T04:15:20.383146
| 2019-04-15T22:20:48
| 2019-04-15T22:20:48
| 30,401,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import libnmap
from libnmap.process import NmapProcess
from libnmap.parser import NmapParser, NmapParserException
import os
import sys
import subprocess
import multiprocessing
from multiprocessing import Process, Queue
# start a new nmap scan on localhost with some specific options
def do_scan(targets, options):
parsed = None
nmproc = NmapProcess(targets, options)
rc = nmproc.run()
if rc != 0:
print("nmap scan failed: {0}".format(nmproc.stderr))
print(type(nmproc.stdout))
try:
parsed = NmapParser.parse(nmproc.stdout)
except NmapParserException as e:
print("Exception raised while parsing scan: {0}".format(e.msg))
return parsed
# print scan results from a nmap report
def print_scan(nmap_report):
print("Starting Nmap {0} ( http://nmap.org ) at {1}".format(
nmap_report.version,
nmap_report.started))
for host in nmap_report.hosts:
if len(host.hostnames):
tmp_host = host.hostnames.pop()
else:
tmp_host = host.address
print("Nmap scan report for {0} ({1})".format(
tmp_host,
host.address))
print("Host is {0}.".format(host.status))
print(" PORT STATE SERVICE")
for serv in host.services:
pserv = "{0:>5s}/{1:3s} {2:12s} {3}".format(
str(serv.port),
serv.protocol,
serv.state,
serv.service)
if len(serv.banner):
pserv += " ({0})".format(serv.banner)
print(pserv)
print(nmap_report.summary)
print'# Reconator #'
print'#go, drink coffee#'
if __name__ == "__main__":
f =open('sys.argv[1]' 'r')
if sys.argv.count() < 1:
print "usage : recontor.py iplist.txt"
else:
for scanip in f:
jobs = []
report = do_scan(scanip, "-sS -sV -sC -O -p- -T4 -nvvv -Pn ")
# p = multiprocessing.Process(targets=do_scan,args=(scanip,))
# jobs.append(p)
if report:
print_scan(report)
else:
print("No results returned")
|
[
"toxic@t28.io"
] |
toxic@t28.io
|
4fa016208f4968d005714f2d87dac7a6746a25fb
|
9d84a9bf3d7034438d4da1397a45a604f07d2802
|
/Meshless_Python/rascunho.py
|
a3f1fc52aec3624b244099d00470d0873ef6cf86
|
[] |
no_license
|
CatarinaPinheiro/MetodosSemMalha
|
98470dd7baaf027245133458fef69ce07e425254
|
013688682ac4c0f2899294566913d2b85bcfd164
|
refs/heads/master
| 2020-03-14T12:23:54.916183
| 2018-12-09T13:22:58
| 2018-12-09T13:22:58
| 131,609,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,418
|
py
|
import numpy as np
import weightmatrix as wm
from numpy import linalg as la
import basismatrix as bm
import basis as b
import coefficients as c
import minimumradius as mr
r_data = [[0, 1], [0.5, 1.5], [1, 2], [1.5, 2.5], [2, 3], [2.5, 3.5], [3, 4], [3.5, 4.5], [4, 5]]
contour_point = [[0, 0], [4, 4]]
radius_approximation = mr.get_radius(r_data, r_data[4], 3, contour_point)
print(radius_approximation)
base = b.pde_basis(1)[0]
P = bm.create_basis(base, r_data, r_data[4], radius_approximation)
Pt = np.transpose(P)
Peso = wm.W(r_data, r_data[4], radius_approximation)
print('P = ', P)
print('Pt = ', Pt)
print('W = ', Peso)
A = Pt @ Peso @ P
print('A = ', A)
B_matrix = Pt @ Peso
pt = bm.create_basis(base, [r_data[4]])
print('pt = ', pt)
phi = pt @ la.inv(A) @ B_matrix
print('phi = ', phi)
coef = c.coefficients(r_data, r_data[4], 1, contour_point)
print('coefficients = ', coef)
dptd_ = bm.create_basis(b.pde_basis(1)[1], [r_data[4]])
dWd_ = wm.W(r_data, r_data[4], radius_approximation, {
'order': 1,
'var': 'x'
})
dAd_ = Pt @ dWd_ @ P
dBd_ = Pt @ dWd_
invA = la.inv(A)
d1 = dptd_ @ invA @ B_matrix - pt @ invA @ dAd_ @ invA @ B_matrix + pt @ invA @ dBd_
print('derivative = ', d1)
pde_differential_x = { # Information about pde equation
'order': 1, # order of pde
'var': 'x', # Variable
'base1': b.pde_basis(1)[1],
'base2': b.pde_basis(1)[3]
}
dcoef_1 = c.coefficients(r_data, r_data[4], 1, contour_point, pde_differential_x)
print('coefficients derivative = ', dcoef_1)
dptd_2 = bm.create_basis(b.pde_basis(1)[4], [r_data[4]])
d2Wd_ = wm.W(r_data, r_data[4], radius_approximation, {
'order': 2,
'var': 'y'
})
d2Bd_2 = Pt @ d2Wd_
d2Ad_2 = d2Bd_2 @ P
d2 = dptd_2 @ invA @ B_matrix + np.array([[
2]]) @ pt @ invA @ dAd_ @ invA @ dAd_ @ invA @ B_matrix - pt @ invA @ d2Ad_2 @ invA @ B_matrix + pt @ invA @ d2Bd_2 - np.array(
[[2]]) @ dptd_ @ invA @ dAd_ @ invA @ B_matrix + np.array([[2]]) @ dptd_ @ invA @ dBd_ - np.array(
[[2]]) @ pt @ invA @ dAd_ @ invA @ dBd_
print('d2 =', d2)
pde_differential_xx = { # Information about pde equation
'order': 2, # order of pde
'var': 'x', # Variable
'base1': b.pde_basis(1)[1],
'base2': b.pde_basis(1)[4]
}
coef_2 = c.coefficients(r_data, r_data[4], 1, contour_point, pde_differential_xx)
print('coef_2 = ', coef_2)
print(np.log(2))
print('phi = ', phi)
print('coefficients = ', coef)
|
[
"catarina_pine@hotmail.com"
] |
catarina_pine@hotmail.com
|
260bc80dd5742c558de48ccfb624ce6a0f75a8be
|
6fd6d2f5c68520924670114ddd24f9d7a3888c3f
|
/polymerxtal/visualize.py
|
f4c4205e9c3efa70b317da7335b2b667625c66c0
|
[
"BSD-3-Clause"
] |
permissive
|
lengxupa/polymerxtal
|
571e8bd14b7e89f6cf282a08b2036e556d8664d0
|
7deb455b5ef23991c183489b1f4c0d8f004d4b52
|
refs/heads/master
| 2023-08-17T07:14:19.423912
| 2021-09-17T18:29:48
| 2021-09-17T18:29:48
| 287,057,077
| 1
| 3
|
BSD-3-Clause
| 2021-01-21T19:28:17
| 2020-08-12T16:00:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,986
|
py
|
"""
Functions for visualization of molecules
"""
import numpy as np
import sys
from mpl_toolkits.mplot3d import Axes3D
try:
from ovito.io import import_file
from ovito.vis import Viewport
use_ovito = True
except:
use_ovito = False
try:
import matplotlib.pyplot as plt
except:
from polymerxtal.io import check_nanohub
use_nanohub = check_nanohub()
if not (use_ovito and use_nanohub):
import matplotlib.pyplot as plt
from polymerxtal.data import atom_colors
def draw_molecule(coordinates, symbols, draw_bonds=None, save_location=None, dpi=300):
# Draw a picture of a molecule using matplotlib.
# Create figure
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
# Get colors - based on atom name
colors = []
for atom in symbols:
colors.append(atom_colors[atom])
size = np.array(plt.rcParams["lines.markersize"] ** 2) * 200 / (len(coordinates))
ax.scatter(
coordinates[:, 0],
coordinates[:, 1],
coordinates[:, 2],
marker="o",
edgecolors="k",
facecolors=colors,
alpha=1,
s=size,
)
# Draw bonds
if draw_bonds:
for atoms, bond_length in draw_bonds.items():
atom1 = atoms[0]
atom2 = atoms[1]
ax.plot(
coordinates[[atom1, atom2], 0],
coordinates[[atom1, atom2], 1],
coordinates[[atom1, atom2], 2],
color="k",
)
# Save figure
if save_location:
plt.savefig(save_location, dpi=dpi, graph_min=0, graph_max=2)
return ax
def bond_histogram(bond_list, save_location=None, dpi=300, graph_min=0, graph_max=2):
# Draw a histogram of bond lengths based on a bond_list (output from build_bond_list function)
lengths = []
for atoms, bond_length in bond_list.items():
lengths.append(bond_length)
bins = np.linspace(graph_min, graph_max)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlabel("Bond Length (angstrom)")
plt.ylabel("Number of Bonds")
ax.hist(lengths, bins=bins)
# Save figure
if save_location:
plt.savefig(save_location, dpi=dpi)
return ax
def ovito_view(sample_path, filename, view="Perspective"):
"""
Use the package ovito to make visualizaitons of molecules.
Parameters
----------
sample_path : str
The path of the file to visualize
filename : str
The name of the output file image
view : str (optional)
The view to use
"""
if use_ovito:
# Import the sample file.
pipeline = import_file(sample_path)
pipeline.source.data.cell.vis.enabled = False
pipeline.source.data.particles.vis.radius = 0.5
pipeline.add_to_scene()
vp = Viewport()
if view == "Perspective":
vp.type = Viewport.Type.Perspective
vp.camera_dir = (-1, -1, -1)
elif view == "Ortho":
vp.type = Viewport.Type.Ortho
vp.camera_dir = (-1, -1, -1)
elif view == "Top":
vp.type = Viewport.Type.Top
elif view == "Bottom":
vp.type = Viewport.Type.Bottom
elif view == "Front":
vp.type = Viewport.Type.Front
elif view == "Back":
vp.type = Viewport.Type.Back
elif view == "Left":
vp.type = Viewport.Type.Left
elif view == "Right":
vp.type = Viewport.Type.Right
vp.zoom_all()
vp.render_image(
size=(800, 600), filename=filename, background=(0, 0, 0), frame=0
)
pipeline.remove_from_scene()
else:
print(
"Cannot use function ovito_view - the package ovito is not installed or cannot be found."
)
def main(sample_path, filename, view):
ovito_view(sample_path, filename, view=view)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2], sys.argv[3])
|
[
"shen276@purdue.edu"
] |
shen276@purdue.edu
|
7aba85c6ebf81e6b8ed347093357cbff4d827275
|
4f4631bc9fac672badb5e98d0f8907b2eb56266b
|
/test/functional/feature_snapshot_creation.py
|
0322fbea12bd0c4e46ef89a80656f2e9599f6a83
|
[
"MIT"
] |
permissive
|
Ruteri/unit-e
|
28e4b0a9bf2cc5518ed6a9fb79191419d51db67b
|
d3d12508b915986841bd19c4dee9e50dd662a112
|
refs/heads/master
| 2020-05-15T04:32:21.220451
| 2019-04-17T16:25:03
| 2019-04-17T16:25:03
| 182,087,190
| 0
| 0
| null | 2019-04-18T12:51:34
| 2019-04-18T12:51:34
| null |
UTF-8
|
Python
| false
| false
| 6,087
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Unit-e developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test Snapshot Creation
This test checks the following scenarios:
1. node generates snapshots with the expected interval
2. node keeps up to 5 snapshots
3. node keeps at least 3 finalized snapshots
"""
from test_framework.regtest_mnemonics import regtest_mnemonics
from test_framework.test_framework import UnitETestFramework
from test_framework.util import (
assert_equal,
wait_until,
sync_blocks,
assert_finalizationstate,
)
class SnapshotCreationTest(UnitETestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.extra_args = [
['-validating=1'],
[],
]
self.num_nodes = len(self.extra_args)
def run_test(self):
def verify_snapshot_result(res):
if 'snapshot_hash' not in res:
return False
if 'valid' not in res:
return False
return res['valid'] is True
def has_valid_snapshot_for_height(node, height):
res = node.getblocksnapshot(node.getblockhash(height))
return verify_snapshot_result(res)
validator = self.nodes[0]
node = self.nodes[1]
validator.importmasterkey(regtest_mnemonics[0]['mnemonics'])
node.importmasterkey(regtest_mnemonics[1]['mnemonics'])
node.generatetoaddress(1, node.getnewaddress('', 'bech32')) # IBD
# test 1. node generates snapshots with the expected interval
node.generatetoaddress(23, node.getnewaddress('', 'bech32'))
wait_until(lambda: len(node.listsnapshots()) == 5)
assert has_valid_snapshot_for_height(node, 4)
assert has_valid_snapshot_for_height(node, 9)
assert has_valid_snapshot_for_height(node, 14)
assert has_valid_snapshot_for_height(node, 19)
assert has_valid_snapshot_for_height(node, 24)
# test 2. node keeps up to 5 snapshots
node.generatetoaddress(5, node.getnewaddress('', 'bech32'))
assert_equal(node.getblockcount(), 29)
wait_until(lambda: has_valid_snapshot_for_height(node, 29), timeout=10)
assert_equal(len(node.listsnapshots()), 5)
assert has_valid_snapshot_for_height(node, 4) is False
assert has_valid_snapshot_for_height(node, 9)
assert has_valid_snapshot_for_height(node, 14)
assert has_valid_snapshot_for_height(node, 19)
assert has_valid_snapshot_for_height(node, 24)
# disable instant justification
payto = validator.getnewaddress("", "legacy")
txid = validator.deposit(payto, 1500)
self.wait_for_transaction(txid, 10)
self.stop_node(validator.index)
node.generatetoaddress(12, node.getnewaddress('', 'bech32'))
assert_equal(node.getblockcount(), 41)
assert_finalizationstate(node, {'currentDynasty': 6,
'currentEpoch': 9,
'lastJustifiedEpoch': 7,
'lastFinalizedEpoch': 6,
'validators': 1})
wait_until(lambda: has_valid_snapshot_for_height(node, 39), timeout=10)
assert_equal(len(node.listsnapshots()), 5)
assert has_valid_snapshot_for_height(node, 9) is False
assert has_valid_snapshot_for_height(node, 14) is False
assert node.getblocksnapshot(node.getblockhash(19))['snapshot_finalized']
assert node.getblocksnapshot(node.getblockhash(24))['snapshot_finalized']
assert node.getblocksnapshot(node.getblockhash(29))['snapshot_finalized']
assert node.getblocksnapshot(node.getblockhash(34))['snapshot_finalized'] is False
assert node.getblocksnapshot(node.getblockhash(39))['snapshot_finalized'] is False
# test 3. node keeps at least 2 finalized snapshots
node.generatetoaddress(9, node.getnewaddress('', 'bech32'))
wait_until(lambda: has_valid_snapshot_for_height(node, 49), timeout=10)
assert_equal(len(node.listsnapshots()), 5)
assert has_valid_snapshot_for_height(node, 19) is False
assert node.getblocksnapshot(node.getblockhash(24))['snapshot_finalized']
assert node.getblocksnapshot(node.getblockhash(29))['snapshot_finalized']
assert has_valid_snapshot_for_height(node, 34) is False
assert node.getblocksnapshot(node.getblockhash(39))['snapshot_finalized'] is False
assert node.getblocksnapshot(node.getblockhash(44))['snapshot_finalized'] is False
assert node.getblocksnapshot(node.getblockhash(49))['snapshot_finalized'] is False
node.generatetoaddress(5, node.getnewaddress('', 'bech32'))
wait_until(lambda: has_valid_snapshot_for_height(node, 54), timeout=10)
assert_equal(len(node.listsnapshots()), 5)
assert node.getblocksnapshot(node.getblockhash(24))['snapshot_finalized']
assert node.getblocksnapshot(node.getblockhash(29))['snapshot_finalized']
assert has_valid_snapshot_for_height(node, 39) is False
assert node.getblocksnapshot(node.getblockhash(44))['snapshot_finalized'] is False
assert node.getblocksnapshot(node.getblockhash(49))['snapshot_finalized'] is False
node.generatetoaddress(5, node.getnewaddress('', 'bech32'))
wait_until(lambda: has_valid_snapshot_for_height(node, 59), timeout=10)
assert_equal(len(node.listsnapshots()), 5)
assert node.getblocksnapshot(node.getblockhash(24))['snapshot_finalized']
assert node.getblocksnapshot(node.getblockhash(29))['snapshot_finalized']
assert has_valid_snapshot_for_height(node, 44) is False
assert node.getblocksnapshot(node.getblockhash(49))['snapshot_finalized'] is False
assert node.getblocksnapshot(node.getblockhash(54))['snapshot_finalized'] is False
if __name__ == '__main__':
SnapshotCreationTest().main()
|
[
"noreply@github.com"
] |
Ruteri.noreply@github.com
|
2d28e463f1087e08ef742eb7ec1d87f589b99e31
|
3240dd0d68c2df789af6f7da70b6bcf0a0fa0f94
|
/main.py
|
e5ee83b6a0eb617161e4c31654937f74c8c4a3a2
|
[] |
no_license
|
Equilibris/Factorizing
|
0a548c8c05ddc17fdf6c05065d12f3c9d07abb53
|
51e2c7bd86d9c5b396c593f055b9a75fbb089f34
|
refs/heads/master
| 2021-06-19T08:24:44.805674
| 2021-05-07T16:26:24
| 2021-05-07T16:26:24
| 210,669,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
import numbers
# print(numbers.Equation.fromSpecal(23,25,numbers.OpperationFactory.divide).factors)
n = numbers.Number.fromFloat(20)
n2 = numbers.Number.fromFloat(5)
print(n.value,n.fractData)
# print(n2.factors)
# n3 = n/100
# n3 = numbers.Fraction.fromSpecal(1000,10)
f = n / n2
# print(f.value, f.fractData)
# print(n.value,n2.value)
# f = f + numbers.Number.fromFloat(9/-5)
# f = f + numbers.Number.fromInt(-1000)
f = f - n*n2
print(f.value, f.fractData)
|
[
"williambilly42@gmail.com"
] |
williambilly42@gmail.com
|
c72fb300065a7e41cf9584bc194214d698246a47
|
8d50cc4f37c153fcb51de4501f3fa50c00394d9b
|
/test/benchmark/resnet_slim_benchmark.py
|
3d10631b62d37ff457840007840c8b2d4b969844
|
[
"MIT"
] |
permissive
|
liujuanLT/InsightFace_TF
|
dbd239dfdda1866c348e82211932884f73cb3067
|
257b6e0dcf7e7c3523dc7e1c08ba529fab1bf75b
|
refs/heads/master
| 2022-04-27T21:24:01.458277
| 2022-03-17T12:28:15
| 2022-03-17T12:28:15
| 463,040,192
| 0
| 0
|
MIT
| 2022-02-24T06:51:16
| 2022-02-24T06:51:15
| null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
import tensorflow as tf
import tensorflow.contrib.slim.nets as nets
import numpy as np
slim = tf.contrib.slim
resnet = nets.resnet_v1
if __name__ == '__main__':
output_shape = 85164
batch_size = 64
image = tf.placeholder(name='input_x', shape=[None, 224, 224, 3], dtype=tf.float32)
labels = tf.placeholder(name='input_label', shape=[None, output_shape], dtype=tf.float32)
with slim.arg_scope(nets.resnet_utils.resnet_arg_scope()):
resnet_50, end_points = resnet.resnet_v1_50(inputs=image, num_classes=output_shape, scope='resnet_v1_50')
prob = tf.squeeze(resnet_50, axis=[1, 2])
probabilities = tf.reduce_mean(tf.nn.softmax(prob, dim=-1))
losses = tf.norm(tf.subtract(probabilities, labels))
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(losses)
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
while True:
datasets = np.random.randn(batch_size, 224, 224, 3).astype(np.float32)
datasets_labels = np.random.randn(batch_size, output_shape).astype(np.float32)
losses_val, _ = sess.run([losses, train_op], feed_dict={image: datasets, labels: datasets_labels})
print(losses_val)
|
[
"auroua@yeah.net"
] |
auroua@yeah.net
|
e9868115fb1b17cac45a92abf205614c1dfca74d
|
3223859df7266ce695fc226cac1c189ee1b976ce
|
/ALE_Example1/Controllers_For_Example/Chicken_Controller4/Chicken_Controller4.py
|
30c10dab7e9caeffb3f34423cfc58858f1b8be0f
|
[] |
no_license
|
MGBirch/ALEFramework
|
483936097a0bfd7e91fd29f78de59d2fe3973ef1
|
b5ab5379a11386ecb95cef11bfa186ebee693f22
|
refs/heads/master
| 2023-04-14T04:27:20.069818
| 2021-04-27T12:13:27
| 2021-04-27T12:13:27
| 346,075,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
from ALEFramework.LeggedPredator import LeggedPredator
class Chicken(LeggedPredator):
def __init__(self):
mdNames = ['leg1', 'leg2', 'leg3', 'leg4']
turnSpeed = 1.0
forSpeed = 1.0
objName = 'chicken4'
self.food = ['grain', 'grain1', 'grain2', 'grain3']
super().__init__(mdNames, forSpeed, objName)
self.legs = self.getMotorDevices()
self.infPos = float('inf')
self.multiMoveMotorPos(self.legs, self.infPos)
self.setMultiMotorVel(self.legs, 1)
self.setMaxEnergy(100000)
self.setEnergy(100000)
self.setConsumptionEnergy(10000)
self.setFocusAngle(0.1)
def behaviour(self):
while self.robot.step(self.timestep) != -1:
self.energy = self.energy -1
if self.energy > 600000:
self.moveForward()
isObstacle = self.checkObstacle()
if isObstacle:
self.avoidObstacle(isObstacle)
else:
self.predBehaviour()
collided = self.checkEnergyCollision(self.food)
if collided:
self.eat(collided)
chicken = Chicken()
chicken.behaviour()
|
[
"noreply@github.com"
] |
MGBirch.noreply@github.com
|
f4cda1bae13452fdd41043adbee86bf30238644a
|
45380b07df616b8b2a8f5ad7a9ece6a5456d7f33
|
/spiral_copy/spiral_copy.py
|
0991fc5ec4cf79ffff1a4f8fbc7000963435b607
|
[] |
no_license
|
TheBroMoe/pramp-interview-questions
|
dd68707a6b1f8346e9df338deddd4e1eb0247d68
|
b552551adc958bf980c42504866b303d86513aca
|
refs/heads/master
| 2020-03-24T06:13:01.562847
| 2018-09-06T02:24:17
| 2018-09-06T02:24:17
| 142,520,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
'''
[ [1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20] ]
[1,2,3,...]
output_array = []
1. traverse first list from 0 to n
append to output_array
stop searching
2. append last element of each list
3. append first to last
4. append first elemnt of each list
5. repeat 1. ignore last element
psuedo-code:
1. traversing left to right (left_col, right_col)
2. up to down (top_row, bot_row)
3. right to left (right_col, left_col)
4. down to up (bot_row, top_row)
O(n) time
O(n) space
R = numbers of rows
C = # cols
'''
def spiral_copy(inputMatrix):
total_col = len(inputMatrix[0])
total_row = len(inputMatrix)
left_col = 0
right_col = total_col
top_row = 0
bot_row = total_row
output = []
while top_row <= bot_row and left_col <= right_col:
for i in range(left_col, right_col):
output.append(inputMatrix[top_row][i])
top_row += 1
for j in range(top_row, bot_row):
output.append(inputMatrix[j][right_col])
right_col -= 1
if top_row <= bot_row:
for i in range(right_col, left_col):
output.append(inputMatrix[bot_row][i])
bot_row -= 1
if left_col <= right_col:
for j in range(bot_row , top_row):
output.append(inputMatrix[j][left_col])
left_col += 1
return output
|
[
"kebbi@ualberta.ca"
] |
kebbi@ualberta.ca
|
22b47dad8aa99195f72c644637c87c628c275287
|
1dfb2227a17d71d7bf4f02f96a253c8ac9e36edd
|
/dennisivy/settings.py
|
4c98d4d7d23c43e621a300f5a5bc63cc2f8db4ac
|
[] |
no_license
|
Yohannes27/johannes
|
08c5c68bb3480a2d208b82e76fecf6756f815cf8
|
81cda3cd4cee4cd764639401ea2ba3ae9dea8d7e
|
refs/heads/master
| 2023-06-15T22:11:00.524732
| 2021-07-17T08:59:55
| 2021-07-17T08:59:55
| 386,883,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,416
|
py
|
"""
Django settings for dennisivy project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hrj48*mobkzo)s6q+2wfqcg=_=@5c38n&sb4_vlp#bb^zyp_pi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['yohannisayana.herokuapp.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'base.apps.BaseConfig',
'crispy_forms',
'django_filters',
'ckeditor',
'ckeditor_uploader',
'storages',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dennisivy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dennisivy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'HOST': 'database endpoint',
'PORT': 'database port'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images')
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = "joelawro@gmail.com"
EMAIL_HOST_PASSWORD = 'johnny ceo of gmail'
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'full',
'height': 300,
'width': '100%',
},
}
AWS_QUERYSTRING_AUTH = False
#STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
#DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
#AWS_ACCESS_KEY_ID = 'YOUR-AWS-ACCESS-KEY-ID'
#AWS_SECRET_ACCESS_KEY = 'YOUR_AWS-SECRET-ACCESS-KEY'
#AWS_STORAGE_BUCKET_NAME = 'YOU_BUCKET_NAME'
#AWS_S3_FILE_OVERWRITE = False
#AWS_DEFAULT_ACL = None
|
[
"joelawro@gmail.com"
] |
joelawro@gmail.com
|
093b4ed2cab951bd4c4af98e355f13a85fdf697b
|
3784495ba55d26e22302a803861c4ba197fd82c7
|
/venv/lib/python3.6/site-packages/gensim/similarities/levenshtein.py
|
e517c5121744a6d4dd34b418f439b626454109cb
|
[
"MIT"
] |
permissive
|
databill86/HyperFoods
|
cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789
|
9267937c8c70fd84017c0f153c241d2686a356dd
|
refs/heads/master
| 2021-01-06T17:08:48.736498
| 2020-02-11T05:02:18
| 2020-02-11T05:02:18
| 241,407,659
| 3
| 0
|
MIT
| 2020-02-18T16:15:48
| 2020-02-18T16:15:47
| null |
UTF-8
|
Python
| false
| false
| 5,352
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Vit Novotny <witiko@mail.muni.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module provides a namespace for functions that use the Levenshtein distance.
"""
from itertools import islice
import logging
from math import floor
from gensim.similarities.termsim import TermSimilarityIndex
logger = logging.getLogger(__name__)
def levdist(t1, t2, max_distance=float("inf")):
"""Get the Levenshtein distance between two terms.
Return the Levenshtein distance between two terms. The distance is a
number between <1.0, inf>, higher is less similar.
Parameters
----------
t1 : {bytes, str, unicode}
The first compared term.
t2 : {bytes, str, unicode}
The second compared term.
max_distance : {int, float}, optional
If you don't care about distances larger than a known threshold, a more
efficient code path can be taken. For terms that are clearly "too far
apart", we will not compute the distance exactly, but we will return
`max(len(t1), len(t2))` more quickly, meaning "more than
`max_distance`".
Default: always compute distance exactly, no threshold clipping.
Returns
-------
int
The Levenshtein distance between `t1` and `t2`.
"""
import Levenshtein
distance = Levenshtein.distance(t1, t2)
if distance > max_distance:
return max(len(t1), len(t2))
return distance
def levsim(t1, t2, alpha=1.8, beta=5.0, min_similarity=0.0):
"""Get the Levenshtein similarity between two terms.
Return the Levenshtein similarity between two terms. The similarity is a
number between <0.0, 1.0>, higher is more similar.
Parameters
----------
t1 : {bytes, str, unicode}
The first compared term.
t2 : {bytes, str, unicode}
The second compared term.
alpha : float, optional
The multiplicative factor alpha defined by Charlet and Damnati (2017).
beta : float, optional
The exponential factor beta defined by Charlet and Damnati (2017).
min_similarity : {int, float}, optional
If you don't care about similarities smaller than a known threshold, a
more efficient code path can be taken. For terms that are clearly "too
far apart", we will not compute the distance exactly, but we will
return zero more quickly, meaning "less than `min_similarity`".
Default: always compute similarity exactly, no threshold clipping.
Returns
-------
float
The Levenshtein similarity between `t1` and `t2`.
Notes
-----
This notion of Levenshtein similarity was first defined in section 2.2 of
`Delphine Charlet and Geraldine Damnati, "SimBow at SemEval-2017 Task 3:
Soft-Cosine Semantic Similarity between Questions for Community Question
Answering", 2017 <http://www.aclweb.org/anthology/S/S17/S17-2051.pdf>`_.
"""
assert alpha >= 0
assert beta >= 0
max_lengths = max(len(t1), len(t2))
if max_lengths == 0:
return 1.0
min_similarity = float(max(min(min_similarity, 1.0), 0.0))
max_distance = int(floor(max_lengths * (1 - (min_similarity / alpha) ** (1 / beta))))
distance = levdist(t1, t2, max_distance)
similarity = alpha * (1 - distance * 1.0 / max_lengths)**beta
return similarity
class LevenshteinSimilarityIndex(TermSimilarityIndex):
"""
Computes Levenshtein similarities between terms and retrieves most similar
terms for a given term.
Notes
-----
This is a naive implementation that iteratively computes pointwise Levenshtein similarities
between individual terms. Using this implementation to compute the similarity of all terms in
real-world dictionaries such as the English Wikipedia will take years.
Parameters
----------
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
A dictionary that specifies the considered terms.
alpha : float, optional
The multiplicative factor alpha defined by Charlet and Damnati (2017).
beta : float, optional
The exponential factor beta defined by Charlet and Damnati (2017).
threshold : float, optional
Only terms more similar than `threshold` are considered when retrieving
the most similar terms for a given term.
See Also
--------
:func:`gensim.similarities.levenshtein.levsim`
The Levenshtein similarity.
:class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`
Build a term similarity matrix and compute the Soft Cosine Measure.
"""
def __init__(self, dictionary, alpha=1.8, beta=5.0, threshold=0.0):
self.dictionary = dictionary
self.alpha = alpha
self.beta = beta
self.threshold = threshold
super(LevenshteinSimilarityIndex, self).__init__()
def most_similar(self, t1, topn=10):
similarities = (
(levsim(t1, t2, self.alpha, self.beta, self.threshold), t2)
for t2 in self.dictionary.values()
if t1 != t2
)
most_similar = (
(t2, similarity)
for (similarity, t2) in sorted(similarities, reverse=True)
if similarity > 0
)
return islice(most_similar, topn)
|
[
"luis20dr@gmail.com"
] |
luis20dr@gmail.com
|
14522ad9116c0690e6a9b73bbf98c168a8f15f6e
|
3dddfa7991e905fedb8ea9b8903c17d720f6a4be
|
/config/api_router.py
|
e9bd092e59da7ec5c9be37244fd3bea5312a0d6d
|
[
"MIT"
] |
permissive
|
codescribblr/project-manager-django3
|
9892e05913641dc011eee40512c40efc4fa7aaeb
|
d0dc79e992811eee3e35666acdb16bcafa16d98d
|
refs/heads/master
| 2021-03-07T20:12:47.699515
| 2020-03-19T19:15:32
| 2020-03-19T19:15:32
| 246,294,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from vapor_manager.users.api.views import UserViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
app_name = "api"
urlpatterns = router.urls
|
[
"codescribblr@gmail.com"
] |
codescribblr@gmail.com
|
ba52a043026d9aa6c70b8c067334f239eca8a97d
|
6ad57cf50ef36037401713b68cd4725bec993392
|
/projects/07/src/lib/translator.py
|
528e9d55697346843ae29863d86cc8953a4c0a9b
|
[] |
no_license
|
mtx2d/nand2tetris
|
76121abee92fd016986a83ef81f5ddacdfbdf755
|
6473fcdacc9d3e06fa454a1435719327ccd03c41
|
refs/heads/master
| 2023-01-28T17:05:38.132896
| 2020-12-09T05:01:36
| 2020-12-09T05:01:36
| 292,430,034
| 0
| 0
| null | 2020-11-30T04:08:37
| 2020-09-03T01:02:21
|
Hack
|
UTF-8
|
Python
| false
| false
| 403
|
py
|
from typing import Generator
from .parser import Parser
from .code_writer import CodeWriter
class Translator:
def __init__(self, input_file: str):
self.parser = Parser(input_file)
self.code_writer = CodeWriter(input_file)
def translate(self) -> Generator[str, None, None]:
for inst in self.parser.parse():
yield self.code_writer.write(inst)
|
[
"matrix2d@outlook.com"
] |
matrix2d@outlook.com
|
0b2fc92c265a62bf6c807bfb777e8324c8d00e99
|
737381d9e433d538a9534ca214cad9378a3d05ef
|
/flask/carmen-python/carmen/resolvers/profile.py
|
916865c20ab08db2b6af0a21dff698bcff322f93
|
[
"BSD-2-Clause"
] |
permissive
|
halflkaka/Toxic-Comment-Detector
|
e9987c4014175e03e482fd0c7f6005df752e7078
|
7aecb2fe8acc0163d19f185a25a3adda20738824
|
refs/heads/master
| 2020-04-10T19:16:41.544661
| 2018-12-12T23:23:58
| 2018-12-12T23:23:58
| 161,229,489
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,767
|
py
|
"""Resolvers based on Twitter user profile data."""
import re
import warnings
from names import *
from resolver import AbstractResolver, register
STATE_RE = re.compile(r'.+,\s*(\w+)')
NORMALIZATION_RE = re.compile(r'\s+|\W')
def normalize(location_name, preserve_commas=False):
"""Normalize *location_name* by stripping punctuation and collapsing
runs of whitespace, and return the normalized name."""
def replace(match):
if preserve_commas and ',' in match.group(0):
return ','
return ' '
return NORMALIZATION_RE.sub(replace, location_name).strip().lower()
@register('profile')
class ProfileResolver(AbstractResolver):
"""A resolver that locates a tweet by matching the tweet author's
profile location against known locations."""
name = 'profile'
def __init__(self):
self.location_name_to_location = {}
def add_location(self, location):
aliases = list(location.aliases)
aliases_already_added = set()
for alias in aliases:
if alias in aliases_already_added:
continue
#if alias in self.location_name_to_location:
#warnings.warn('Duplicate location name "%s"' % alias)
else:
self.location_name_to_location[alias] = location
# Additionally add a normalized version of the alias
# stripped of punctuation, and with runs of whitespace
# reduced to single spaces.
normalized = normalize(alias)
if normalized != alias:
aliases.append(normalized)
aliases_already_added.add(alias)
def resolve_tweet(self, tweet):
import sys
location_string = tweet.get('user', {}).get('location', '')
if not location_string:
return None
normalized = normalize(location_string)
if normalized in self.location_name_to_location:
return (False, self.location_name_to_location[normalized])
# Try again with commas.
normalized = normalize(location_string, preserve_commas=True)
match = STATE_RE.search(normalized)
if match:
after_comma = match.group(1)
location_name = None
if after_comma in US_STATES or after_comma in COUNTRIES:
location_name = after_comma
elif after_comma in US_STATE_ABBREVIATIONS:
location_name = US_STATE_ABBREVIATIONS[after_comma]
elif after_comma in COUNTRY_CODES:
location_name = COUNTRY_CODES[after_comma]
if location_name in self.location_name_to_location:
return (False, self.location_name_to_location[location_name])
return None
|
[
"yzzhou.2017@hotmail.com"
] |
yzzhou.2017@hotmail.com
|
5aece78f9df30d5b1f02206cef9109a5d8884fce
|
1ebe5a07e7f6260c2c2ceb6ca00dcf2a0341e544
|
/op_impl/built-in/ai_core/tbe/impl/dynamic/sigmoid_grad.py
|
12a4e8060c7c75ec879836ff02245bf134ad404c
|
[] |
no_license
|
gekowa/ascend-opp
|
f5e09905336d85f9974d555d03d37a75cb8185c1
|
5c28a2faf9d2a117ea6f0923efe35fcd53904dd2
|
refs/heads/master
| 2023-04-09T12:14:40.337104
| 2021-04-19T23:00:59
| 2021-04-19T23:00:59
| 359,620,865
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,963
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use
this file except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
sigmoid_grad
"""
from functools import reduce as reduce_ins
from te import tvm
from te import platform as tbe_platform
import te.lang.dynamic
from topi import generic
from te.platform.shape_classifier import classify
from te.platform.shape_classifier import Mode
from te.utils.op_utils import KERNEL_NAME
from te.utils.op_utils import REQUIRED_INPUT
from te.utils.op_utils import REQUIRED_OUTPUT
from te.utils.op_utils import check_dtype
from te.utils.op_utils import check_op_params
from te.utils.op_utils import variable_shape
from te.utils.op_utils import broadcast_shapes
from te.utils.op_utils import check_elewise_shape_range
from te.utils.error_manager import error_manager_vector
from te.utils.op_utils import refine_shapes_for_broadcast
from te.utils.error_manager import error_manager_vector
# General limitation of the reduce size for input shape: 2**30
SHAPE_SIZE_LIMIT = 2 ** 30
# pylint: disable=locally-disabled,too-many-arguments,unused-argument
def sigmoid_grad_compute(x, y, z, kernel_name="sigmoid_grad"):
"""
algorithm : sigmoid grad compute
sigmoid_grad = (sigmoid - sigmoid*sigmoid)*grad
Parameters:
----------
x : a tensor of input data
y : a tensor of grad
z : output dict
kernel_name : cce kernel name, default value is "sigmoid_grad"
Returns
-------
a tenosr
"""
dtype = x.dtype.lower()
cast_support = tbe_platform.cce_conf.api_check_support(
"te.lang.cce.cast_to", "f322f16")
if dtype == "float32" and not cast_support:
error_detail = "float32 transfer to float16 is only supported on mini and cloud platform!"
error_manager_vector.raise_err_two_input_dtype_invalid(kernel_name, "x", 'y', error_detail)
vmul_support = tbe_platform.cce_conf.api_check_support(
"te.lang.cce.vmul", "float32")
vsub_support = tbe_platform.cce_conf.api_check_support(
"te.lang.cce.vsub", "float32")
if dtype == "float16":
x = te.lang.dynamic.cast_to(x, "float32")
y = te.lang.dynamic.cast_to(y, "float32")
sigmoid_square = te.lang.dynamic.vmul(x, x)
if dtype == "float32" and not vmul_support:
sigmoid_square = te.lang.dynamic.cast_to(sigmoid_square, "float16")
tensor_sub = te.lang.dynamic.vsub(x, sigmoid_square)
if dtype == "float32" and not vsub_support:
tensor_sub = te.lang.dynamic.cast_to(tensor_sub, "float16")
res = te.lang.dynamic.vmul(tensor_sub, y)
if dtype == "float16":
res = te.lang.dynamic.cast_to(res, "float16")
return res
@te.op.register_operator("SigmoidGrad")
@check_op_params(REQUIRED_INPUT, REQUIRED_INPUT, REQUIRED_OUTPUT, KERNEL_NAME)
def sigmoid_grad(x,
dx,
out,
kernel_name="sigmoid_grad"):
"""
do sigmoid grad
sigmoid_grad = (sigmoid - sigmoid*sigmoid)*grad
Parameters:
----------
x : dictionary shape of sigmoid input
dx : dictionary shape of grad
out: dictionary output
kernel_name : cce kernel name, default value is "sigmoid_grad_cce"
Returns
-------
None
"""
x_dtype = x.get("dtype").lower()
dx_dtype = dx.get("dtype").lower()
check_list = ("float16", "float32")
check_dtype(x_dtype, check_list, param_name="input_x")
check_dtype(dx_dtype, check_list, param_name="input_dx")
check_elewise_shape_range([x, dx], support_broadcast=False)
if x_dtype != dx_dtype:
error_manager_vector.raise_err_inputs_dtype_not_equal(kernel_name, "x", "dx",
x_dtype, dx_dtype)
ins = classify([x, dx], Mode.ELEWISE)
schedules, tensors = [], []
for (sig, dx) in ins:
with te.op.compute():
shape_sig, shape_dx = variable_shape([sig, dx], support_broadcast=False)
shape_sig, shape_dx = refine_shapes_for_broadcast(shape_sig, shape_dx)
tensor_sig = tvm.placeholder(shape_sig, x_dtype, "tensor_x")
tensor_dx = tvm.placeholder(shape_dx, dx_dtype, "tensor_dx")
res = sigmoid_grad_compute(tensor_sig, tensor_dx, out, kernel_name)
tensors.append([tensor_sig, tensor_dx, res])
with tvm.target.cce():
sch = generic.auto_schedule(res)
schedules.append(sch)
config = {"name": kernel_name, "tensor_list": tensors}
te.lang.dynamic.build(schedules, config)
|
[
"gekowa@gmail.com"
] |
gekowa@gmail.com
|
cc0bff1a878ca2c0d021572fd5105692d941f13d
|
bf6a60eb8467064fdf65628d3d94174422650a51
|
/agenda/settings.py
|
f7e10b16ee080185c6344a87cd18085aa595afa2
|
[] |
no_license
|
saracris33/agenda
|
fa7fc276d51320a714594d06fac5c232d6de903e
|
18982fbb34982410174b012a04ac94a4ca62e5c1
|
refs/heads/master
| 2023-04-03T00:20:12.085075
| 2021-03-28T23:01:46
| 2021-03-28T23:01:46
| 352,085,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,073
|
py
|
"""
Django settings for agenda project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z(b1x*9w5*nt=12+ya@hn=qx+*q7om7o+v-_3rd)wm3-e+7!ep'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'agenda.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'agenda.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"saracristina33@gmail.com"
] |
saracristina33@gmail.com
|
122ff0d6144ede7500d7b14ce445d07e8d7ac56c
|
eef8ad0c5e6b7b69a4077595e0b10e511cf6a140
|
/students_control/wsgi.py
|
21756654b85f65a6c642be8b90342629c4ce5be5
|
[] |
no_license
|
serg-pe/students
|
7528d2c6b75e980b2eba8eb84cc43e263377487a
|
f7a37c8f7edf29f735a711dc1cad17c08cd69284
|
refs/heads/master
| 2023-07-02T22:21:25.712895
| 2021-08-05T14:06:06
| 2021-08-05T14:06:06
| 393,065,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
WSGI config for students_control project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'students_control.settings')
application = get_wsgi_application()
|
[
"pechurin.serg@gmail.com"
] |
pechurin.serg@gmail.com
|
b927b5d25d217da5c4c4e64c5bd351c503b06041
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-resource/azure/mgmt/resource/resources/v2018_02_01/models/plan.py
|
594d670d723c414dc75c3cbcbd6b5e220e0f45a7
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,543
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Plan(Model):
"""Plan for the resource.
:param name: The plan ID.
:type name: str
:param publisher: The publisher ID.
:type publisher: str
:param product: The offer ID.
:type product: str
:param promotion_code: The promotion code.
:type promotion_code: str
:param version: The plan's version.
:type version: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'publisher': {'key': 'publisher', 'type': 'str'},
'product': {'key': 'product', 'type': 'str'},
'promotion_code': {'key': 'promotionCode', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Plan, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.publisher = kwargs.get('publisher', None)
self.product = kwargs.get('product', None)
self.promotion_code = kwargs.get('promotion_code', None)
self.version = kwargs.get('version', None)
|
[
"noreply@github.com"
] |
xiafu-msft.noreply@github.com
|
cbe9093790f695b6df02ae166b045ce24747d369
|
d7024d113fd02d23cefb4909f3138af32c72f885
|
/djlevelfive/basic_app/urls.py
|
1cac84a9bf20eb101880c1d521695b7fd89509af
|
[] |
no_license
|
jegawaran/django_learning_login
|
853da3e58e2bd8d542859796b0e4a0d4c1ce6f25
|
7e93a8d14b3bbc6f11bb8ea7deb71d5e934400dd
|
refs/heads/master
| 2021-05-26T02:00:19.944086
| 2020-04-09T14:01:05
| 2020-04-09T14:01:05
| 254,009,311
| 0
| 0
| null | 2020-04-09T14:01:06
| 2020-04-08T06:54:52
|
Python
|
UTF-8
|
Python
| false
| false
| 252
|
py
|
from django.contrib import admin
from django.urls import path,include
from basic_app import views
app_name = 'basic_app'
urlpatterns = [
path('user_login',views.user_login,name='user_login'),
path('register',views.register,name='register')
]
|
[
"jagatheesjothi@gmail.com"
] |
jagatheesjothi@gmail.com
|
7c0b71d0ee54f8a018fa020cc6f7065ad91a921a
|
11c0d9c266602d204168fc1320fe5e8810045e0b
|
/number guess game.py
|
671be84a1b5cf7c4e5d3901fe1172e829999e462
|
[] |
no_license
|
hiteshkrypton/Python-Programs
|
44d7b463a0d665c3ab6fc7eb6d24a4296121a848
|
28cf3d832d93e56fcc62bdf265b765865d0c1c14
|
refs/heads/master
| 2023-01-03T23:37:05.559438
| 2020-10-06T20:21:05
| 2020-10-06T20:21:05
| 301,844,360
| 1
| 0
| null | 2020-10-25T15:05:57
| 2020-10-06T20:17:32
|
Python
|
UTF-8
|
Python
| false
| false
| 466
|
py
|
# share https://youtu.be/x084tfX4JnI
# subscribed by armor-boop as @varunpariharr ig handel
#plz watch this video https://youtu.be/x084tfX4JnI from codehouse and subscribe for further amazing videos related to coding programming
import random
r = random.randint(1,20)
while(True):
inp = int(input())
if(inp<r):
print("oops,try a greater number")
elif(inp>r):
print("oops,try a smaller number")
else:
print("congrats you choosed write number")
break;
|
[
"noreply@github.com"
] |
hiteshkrypton.noreply@github.com
|
b5aa8609ba4bfc1316d746246c92c28fcc417aac
|
63bf17b9710c983483c920bcef40366a906caca3
|
/myenv/bin/easy_install-2.7
|
d01121c657b04c108bb9601f005a925b17b7b649
|
[] |
no_license
|
yamen225/tryDjango18
|
63a97ad7c1c4be30c41342fb9e4be3cfb9c1cd94
|
c5d72842abdc1e15e7a5a2de586d09fd87b75792
|
refs/heads/master
| 2020-03-22T14:10:10.754849
| 2018-07-08T10:26:01
| 2018-07-08T10:26:01
| 140,158,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
7
|
#!/home/yamen/WorkSpace/Django1.9/Development/myenv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"yamen225@gmail.com"
] |
yamen225@gmail.com
|
1c502f728773194232d669a486066cf73f92df2b
|
0ff7dfab354e475b978a371c0dc35c3246725a1e
|
/lofd/migrations/0049_auto_20200524_2110.py
|
5a778ee1a7bcd36fdc9e7ab9e30e924250a03485
|
[] |
no_license
|
ozgeyilmazh/lofd
|
0f15305a8f95ed0bac8ae32da43a901f888b53c3
|
ac15f788d34c684b5d45891d8fcbeb380c78352c
|
refs/heads/main
| 2023-01-01T00:13:40.103619
| 2020-10-18T23:12:42
| 2020-10-18T23:12:42
| 287,809,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
# Generated by Django 2.2.4 on 2020-05-24 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lofd', '0048_book_cover_image'),
]
operations = [
migrations.CreateModel(
name='Watch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, default='', max_length=200, null=True)),
('cover_image', models.ImageField(blank=True, null=True, upload_to='movies')),
],
),
migrations.DeleteModel(
name='Movie',
),
migrations.DeleteModel(
name='Series',
),
]
|
[
"onder.yilmz@gmail.com"
] |
onder.yilmz@gmail.com
|
f802008470611389229998d8cb6b02f793426a34
|
aadb2d7175273bd99520998d3970456c86c15de9
|
/binder.py
|
eaf321def1404c14d1b5a4cfb4f72084d69330cb
|
[
"BSD-3-Clause"
] |
permissive
|
gahughes/Apertif-Visualization-Example-Docker
|
94bc9a8bf5a2a1dd392f8b45182022ba217b6078
|
c541a0660df2c5427d10b9576fde20b99b806cd4
|
refs/heads/main
| 2023-08-17T00:45:48.888234
| 2021-09-27T11:20:21
| 2021-09-27T11:20:21
| 410,800,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
"""Install code on Binder.
This script is executed from Dockerfile configuration file
It installs software dependencies declared in environment.yml
in the docker container built for the Binder service.
"""
import yaml
import conda.cli
import subprocess
import sys
with open("environment.yml") as stream:
content = yaml.safe_load(stream)
for chan in content['channels']:
print("RUN mamba config --add channels {}".format(chan))
conda.cli.main('mamba', 'config', '--add', 'channels', chan)
for pack in content['dependencies']:
if isinstance(pack, str):
print("RUN mamba install -q -y {}".format(pack))
conda.cli.main('conda', 'install', '-y', '-q', pack)
else:
print("RUN pip install {}".format(pack['pip'][0]))
subprocess.call([sys.executable, "-m", "pip", "install", pack['pip'][0]])
|
[
"gareth.hughe@cta-observatory.org"
] |
gareth.hughe@cta-observatory.org
|
0328bd33ee83d556bf57c8a718f7cbaa035b7531
|
5d058397ba3f90d3c9864076dd2908dd43c0b70a
|
/weather.py
|
dcd73f7d42fe71cbd97e27bb95e50569b80bd5cb
|
[] |
no_license
|
R-Yamaga/git_lesson
|
2604628086f368656d39a98520da504cdcb43299
|
08b588f7996ad23378865792c81d2dd6bd1bf223
|
refs/heads/master
| 2023-03-07T01:25:27.536331
| 2021-02-20T06:41:40
| 2021-02-20T06:41:40
| 340,559,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
import requests
import json
from pprint import pprint
API_KEY = "de0493ad02b5f68f7866c1ea3a8819e4"
BASE_URL = "http://api.openweathermap.org/data/2.5/weather?units=metric&lang=ja"
city = "Kobe,jp"
response = requests.get(BASE_URL + "&q={}&APPID={}".format(city, API_KEY))
weather_data = json.loads(response.text)
# pprint(weather_data)
city = weather_data['name']
weather = weather_data['weather'][0]['description']
result = "{}の天気は{}です。".format(city, weather)
print(result)
|
[
"yamaga.3939.love@gmail.com"
] |
yamaga.3939.love@gmail.com
|
9f77649ff464ce56850ba4c2959988042dc566e6
|
941c37c639551010fb99dfa29d0da15589a86cf9
|
/scripts/classification.py
|
4c4e0d0bbdf830389fd32ea0745963c6607179cd
|
[] |
no_license
|
rpezoa/multiW_analysis
|
0d28ed6b9bfb5b335f6e19aaab62f941f893792f
|
d0d6909fecc19c2e1f547f4efc6fb4b59c05f039
|
refs/heads/main
| 2023-01-21T03:39:06.961507
| 2020-11-27T19:27:39
| 2020-11-27T19:27:39
| 314,274,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,547
|
py
|
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import lightgbm as lgb
import time
from sklearn.model_selection import RandomizedSearchCV
def classification(X_train, y_train):
print("Running classification model ...")
start = time.time()
d_train = lgb.Dataset(X_train, label=y_train)
params = {}
params['learning_rate'] = 0.003
params['boosting_type'] = 'gbdt'
params['objective'] = 'binary'
params['metric'] = ''
params['sub_feature'] = 0.5
params['num_leaves'] = 10
params['min_data'] = 50
params['max_depth'] = 10
clf = lgb.train(params, d_train, 100)
end = time.time()
print(end - start)
return clf
def classification_RF(X_train, y_train):
print("Running classification model ...")
start = time.time()
# random forest model creation
rfc = RandomForestClassifier()
clf = rfc.fit(X_train,y_train)
print("clf.classes_", clf.classes_)
end = time.time()
print(end - start)
return clf
def classification_RF_with_tunning(X_train, y_train,weights=None):
print("Running classification model ...")
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 10, stop = 40, num = 10)] # 200, 2000, 100
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(1, 3, num = 3)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
start = time.time()
rfc = RandomForestClassifier()
# random forest model creation
rf_random = RandomizedSearchCV(estimator = rfc, param_distributions = random_grid, n_iter = 10,
cv = 3, verbose=2, random_state=42, n_jobs = -1)
rf_random.fit(X_train,y_train, sample_weight=weights)
clf = rf_random.best_estimator_
#clf = clf.fit(X_train,y_train)
print("clf.classes_", clf.classes_)
end = time.time()
print(end - start)
return clf
|
[
"rpezoarivera@gmail.com"
] |
rpezoarivera@gmail.com
|
e656df746553b3c6a62b4a2c65f1bf14cff21bed
|
bf842db27fe35c8f4e659682326b4b77bf9b5726
|
/embedding_glove.py
|
7b28503d2ee92628ce560bb9ee0343695e4bcd71
|
[] |
no_license
|
palarunava/deep-learning-exercises
|
79135db506fadd8acd8fe775802e987c642066c8
|
92e195b96a77f731cb767f153308fe05e19a1cbd
|
refs/heads/master
| 2020-04-17T07:38:17.959265
| 2019-02-13T07:49:24
| 2019-02-13T07:49:24
| 166,377,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
from numpy import array
from numpy import asarray
from numpy import zeros
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
#%%
# define documents
docs = ['Well done!',
'Good work',
'Great effort',
'nice work',
'Excellent!',
'Weak',
'Poor effort!',
'not good',
'poor work',
'Could have done better.']
# define class labels
labels = array([1,1,1,1,1,0,0,0,0,0])
#%%
# prepare tokenizer
t = Tokenizer()
t.fit_on_texts(docs)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(docs)
print(encoded_docs)
#%%
# pad documents to a max length of 4 words
max_length = 4
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
print(padded_docs)
#%%
# load the whole embedding into memory
embeddings_index = dict()
f = open('./data/glove.6B/glove.6B.100d.txt', encoding='utf8')
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(embeddings_index))
#%%
# create a weight matrix for words in training docs
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
#%%
# define model
model = Sequential()
e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=4, trainable=False)
model.add(e)
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# summarize the model
print(model.summary())
#%%
# fit the model
model.fit(padded_docs, labels, epochs=50, verbose=0)
#%%
# evaluate the model
loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)
print('Accuracy: %f' % (accuracy*100))
|
[
"pal.arunava83@yahoo.com"
] |
pal.arunava83@yahoo.com
|
4162fc60669d9da300db7416d8d71205d1582779
|
9f1039075cc611198a988034429afed6ec6d7408
|
/tensorflow-stubs/errors/__init__.pyi
|
27774c85b4c35c1cb30753e12fefd8de51da2622
|
[] |
no_license
|
matangover/tensorflow-stubs
|
9422fbb1cb3a3638958d621461291c315f9c6ec2
|
664bd995ef24f05ba2b3867d979d23ee845cb652
|
refs/heads/master
| 2020-05-23T12:03:40.996675
| 2019-05-15T06:21:43
| 2019-05-15T06:21:43
| 186,748,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
pyi
|
# Stubs for tensorflow.errors (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python import OpError as OpError
from tensorflow.python.framework.errors import AbortedError as AbortedError, AlreadyExistsError as AlreadyExistsError, CancelledError as CancelledError, DataLossError as DataLossError, DeadlineExceededError as DeadlineExceededError, FailedPreconditionError as FailedPreconditionError, InternalError as InternalError, InvalidArgumentError as InvalidArgumentError, NotFoundError as NotFoundError, OutOfRangeError as OutOfRangeError, PermissionDeniedError as PermissionDeniedError, ResourceExhaustedError as ResourceExhaustedError, UnauthenticatedError as UnauthenticatedError, UnavailableError as UnavailableError, UnimplementedError as UnimplementedError, UnknownError as UnknownError, error_code_from_exception_type as error_code_from_exception_type, exception_type_from_error_code as exception_type_from_error_code, raise_exception_on_not_ok_status as raise_exception_on_not_ok_status
from tensorflow.python.framework.errors_impl import ABORTED as ABORTED, ALREADY_EXISTS as ALREADY_EXISTS, CANCELLED as CANCELLED, DATA_LOSS as DATA_LOSS, DEADLINE_EXCEEDED as DEADLINE_EXCEEDED, FAILED_PRECONDITION as FAILED_PRECONDITION, INTERNAL as INTERNAL, INVALID_ARGUMENT as INVALID_ARGUMENT, NOT_FOUND as NOT_FOUND, OK as OK, OUT_OF_RANGE as OUT_OF_RANGE, PERMISSION_DENIED as PERMISSION_DENIED, RESOURCE_EXHAUSTED as RESOURCE_EXHAUSTED, UNAUTHENTICATED as UNAUTHENTICATED, UNAVAILABLE as UNAVAILABLE, UNIMPLEMENTED as UNIMPLEMENTED, UNKNOWN as UNKNOWN
|
[
"matangover@gmail.com"
] |
matangover@gmail.com
|
1e6046772828baa97b1ac0a5d2a410f1e5a74fb2
|
a885bfe825f42129155841c71b146ed9e254f285
|
/concurrency/process_io.py
|
128ed4ca10ad8bc6f63078bdee6667031c470824
|
[] |
no_license
|
stackeric/practical-python
|
72c3a0210c0206c66ad608cf7f31da8261a3dca4
|
e6f12189b59953da0887afd85cbac6c576406265
|
refs/heads/master
| 2022-12-02T18:36:38.159509
| 2020-08-25T11:12:51
| 2020-08-25T11:12:51
| 287,465,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
import time
from multiprocessing import Process, Queue
def block_task(q):
time.sleep(1)
q.put("Done")
def main():
n = 4
tasks = []
q = Queue()
result = []
for i in range(n):
p = Process(target=block_task, args=(q,))
tasks.append(p)
for i in tasks:
i.start()
for i in tasks:
res = q.get()
result.append(res)
for i in tasks:
i.join()
for r in result:
print(r)
if __name__ == "__main__":
main()
|
[
"eric@ericde"
] |
eric@ericde
|
606bcb6ee4260f59f029fccb186d5191429fe6bf
|
de84f9d44b59aa39a1612c683cf2de150ef59e9b
|
/qurl/migrations/0001_initial.py
|
43c0b8f99a45263bfb2a764182ab58f5e645ba99
|
[] |
no_license
|
evrenesat/uygulamatik
|
25d7617d4ae6c10623b30d4a57731242efa9a4a7
|
c90da279c6571549e35f51e097524752d9cc2936
|
refs/heads/master
| 2021-01-19T05:39:26.908079
| 2014-06-05T21:57:00
| 2014-06-05T21:57:00
| 65,093,382
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,381
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'QRcode'
db.create_table(u'qurl_qrcode', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('code', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'qurl', ['QRcode'])
def backwards(self, orm):
# Deleting model 'QRcode'
db.delete_table(u'qurl_qrcode')
models = {
u'qurl.qrcode': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'QRcode'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['qurl']
|
[
"esat.ozkan@grow-is.com"
] |
esat.ozkan@grow-is.com
|
d61f4f643c07997b94870339b9e90ad794a8f9fc
|
4769bb5482141b84898a33c4376394389d540c15
|
/docker/models/services.py
|
590729eb19fa03d913f1306d0c27de12ab258293
|
[] |
no_license
|
oraluisarias/dockerPSM
|
a92052a3f0641acdb57b94aba26934754927ddcc
|
4b66ffb9ff061c1661b880fa8f62a6bec94464c0
|
refs/heads/master
| 2021-01-21T18:58:24.671171
| 2017-05-22T21:55:06
| 2017-05-22T21:55:06
| 92,103,006
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,961
|
py
|
import copy
from docker.errors import create_unexpected_kwargs_error
from docker.types import TaskTemplate, ContainerSpec
from .resource import Model, Collection
class Service(Model):
"""A service."""
id_attribute = 'ID'
@property
def name(self):
"""The service's name."""
return self.attrs['Spec']['Name']
@property
def version(self):
"""
The version number of the service. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
return self.attrs.get('Version').get('Index')
def remove(self):
"""
Stop and remove the service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_service(self.id)
def tasks(self, filters=None):
"""
List the tasks in this service.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``node``,
``label``, and ``desired-state``.
Returns:
(:py:class:`list`): List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
filters['service'] = self.id
return self.client.api.tasks(filters=filters)
def update(self, **kwargs):
"""
Update a service's configuration. Similar to the ``docker service
update`` command.
Takes the same parameters as :py:meth:`~ServiceCollection.create`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# Image is required, so if it hasn't been set, use current image
if 'image' not in kwargs:
spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
kwargs['image'] = spec['Image']
create_kwargs = _get_create_service_kwargs('update', kwargs)
return self.client.api.update_service(
self.id,
self.version,
**create_kwargs
)
class ServiceCollection(Collection):
"""Services on the Docker server."""
model = Service
def create(self, image, command=None, **kwargs):
"""
Create a service. Similar to the ``docker service create`` command.
Args:
image (str): The image name to use for the containers.
command (list of str or str): Command to run.
args (list of str): Arguments to the command.
constraints (list of str): Placement constraints.
container_labels (dict): Labels to apply to the container.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
env (list of str): Environment variables, in the form
``KEY=val``.
hostname (string): Hostname to set on the container.
labels (dict): Labels to apply to the service.
log_driver (str): Log driver to use for containers.
log_driver_options (dict): Log driver options.
mode (str): Scheduling mode for the service (``replicated`` or
``global``). Defaults to ``replicated``.
mounts (list of str): Mounts for the containers, in the form
``source:target:options``, where options is either
``ro`` or ``rw``.
name (str): Name to give to the service.
networks (list of str): List of network names or IDs to attach
the service to. Default: ``None``.
resources (Resources): Resource limits and reservations.
restart_policy (RestartPolicy): Restart policy for containers.
secrets (list of :py:class:`docker.types.SecretReference`): List
of secrets accessible to containers for this service.
stop_grace_period (int): Amount of time to wait for
containers to terminate before forcefully killing them.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
user (str): User to run commands as.
workdir (str): Working directory for commands to run.
Returns:
(:py:class:`Service`) The created service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
kwargs['image'] = image
kwargs['command'] = command
create_kwargs = _get_create_service_kwargs('create', kwargs)
service_id = self.client.api.create_service(**create_kwargs)
return self.get(service_id)
def get(self, service_id):
"""
Get a service.
Args:
service_id (str): The ID of the service.
Returns:
(:py:class:`Service`): The service.
Raises:
:py:class:`docker.errors.NotFound`
If the service does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_service(service_id))
def list(self, **kwargs):
"""
List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id`` and ``name``. Default: ``None``.
Returns:
(list of :py:class:`Service`): The services.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return [
self.prepare_model(s)
for s in self.client.api.services(**kwargs)
]
# kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [
'image',
'command',
'args',
'env',
'hostname',
'workdir',
'user',
'labels',
'mounts',
'stop_grace_period',
'secrets',
]
# kwargs to copy straight over to TaskTemplate
TASK_TEMPLATE_KWARGS = [
'resources',
'restart_policy',
]
# kwargs to copy straight over to create_service
CREATE_SERVICE_KWARGS = [
'name',
'labels',
'mode',
'update_config',
'networks',
'endpoint_spec',
]
def _get_create_service_kwargs(func_name, kwargs):
# Copy over things which can be copied directly
create_kwargs = {}
for key in copy.copy(kwargs):
if key in CREATE_SERVICE_KWARGS:
create_kwargs[key] = kwargs.pop(key)
container_spec_kwargs = {}
for key in copy.copy(kwargs):
if key in CONTAINER_SPEC_KWARGS:
container_spec_kwargs[key] = kwargs.pop(key)
task_template_kwargs = {}
for key in copy.copy(kwargs):
if key in TASK_TEMPLATE_KWARGS:
task_template_kwargs[key] = kwargs.pop(key)
if 'container_labels' in kwargs:
container_spec_kwargs['labels'] = kwargs.pop('container_labels')
if 'constraints' in kwargs:
task_template_kwargs['placement'] = {
'Constraints': kwargs.pop('constraints')
}
if 'log_driver' in kwargs:
task_template_kwargs['log_driver'] = {
'Name': kwargs.pop('log_driver'),
'Options': kwargs.pop('log_driver_options', {})
}
# All kwargs should have been consumed by this point, so raise
# error if any are left
if kwargs:
raise create_unexpected_kwargs_error(func_name, kwargs)
container_spec = ContainerSpec(**container_spec_kwargs)
task_template_kwargs['container_spec'] = container_spec
create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
return create_kwargs
|
[
"tflores"
] |
tflores
|
86d517542aae9fb3b1f8c499f26b5e9fd3831df4
|
48040e0159e4c8471923edc1e89260f1350f73d2
|
/forms/autoCreateShifts.py
|
4a38ba876a8012b6084bcab6d1ea15326551492c
|
[] |
no_license
|
flurl/lagerManager
|
496214fd841987882e8debbdccc5e81d76727f01
|
f7617b9dddc3ff175e44bb847583fbffb933c716
|
refs/heads/master
| 2020-05-21T15:19:07.969152
| 2019-06-17T15:40:16
| 2019-06-17T15:40:16
| 5,766,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,509
|
py
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui, QtSql
from CONSTANTS import *
import DBConnection
import config
from lib.GlobalConfig import globalConf
from forms.formBase import FormBase
from ui.forms.autoCreateShiftsForm_gui import Ui_AutoCreateShiftsForm
import datetime
class AutoCreateShiftsForm(FormBase):
uiClass = Ui_AutoCreateShiftsForm
ident = 'autoCreateShifts'
def __init__(self, parent, *args, **kwargs):
super(AutoCreateShiftsForm, self).__init__(parent, *args, **kwargs)
self.parent = parent
def setupUi(self):
super(AutoCreateShiftsForm, self).setupUi()
self.ui.dateTimeEdit_shift.setDate(QtCore.QDate.currentDate())
try:
lastImportURL = config.config['connection'][DBConnection.connName]['last_shift_import_url']
except KeyError:
lastImportURL = ''
self.ui.lineEdit_URL.setText(lastImportURL)
try:
lastImportedShiftDateTime = globalConf['last_imported_shift_datetime']
except KeyError:
lastImportedShiftDateTime = QtCore.QDateTime.currentDateTime().toTime_t()
dt = QtCore.QDateTime()
dt.setTime_t(lastImportedShiftDateTime)
self.ui.dateTimeEdit_ignoreBefore.setDateTime(dt)
def setupSignals(self):
super(AutoCreateShiftsForm, self).setupSignals()
self.connect(self.ui.pushButton_createShifts, QtCore.SIGNAL('clicked()'), self.createShifts)
def createShifts(self):
idx = self.ui.tabWidget.currentIndex()
if idx == 0:
self.createRecurringShifts()
elif idx == 1:
self.importShifts()
def createRecurringShifts(self):
recurrence = self.ui.comboBox_recurrence.currentIndex()
count = self.ui.spinBox_recurrenceCount.value()
name = self.ui.lineEdit_shiftName.text()
beginDate = self.ui.dateTimeEdit_shift.date()
beginTime = self.ui.dateTimeEdit_shift.time()
self.beginTransaction()
for i in range(count):
date = beginDate
if recurrence == 0:
date = date.addDays(i*1)
elif recurrence == 1:
date = date.addDays(i*7)
elif recurrence == 2:
date = date.addMonths(i*1)
elif recurrence == 3:
date = date.addYears(i*1)
if not self.createShift(name, date, beginTime):
self.rollback()
QtGui.QMessageBox.critical(self, u'Schichterstellung fehlgeschlagen',
u'Die Schichten konnten nicht erstellt werden!\nBitte kontaktieren Sie Ihren Datenbank Administrator')
self.commit()
QtGui.QMessageBox.information(self, u'Schichterstellung erfolgreich',
u'Die Schichten wurden erfolgreich erstellt')
return True
def importShifts(self):
from lib.feedparser import feedparser
url = unicode(self.ui.lineEdit_URL.text())
feed = feedparser.parse(url)
self.beginTransaction()
maxDateTime = ignoreBefore = self.ui.dateTimeEdit_ignoreBefore.dateTime()
for item in feed['items']:
name = item['title']
dateTime = item['published'][:-6] #strip the last 6 chars for easier parsing below
locale = QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates)
dateTime = locale.toDateTime(dateTime, 'ddd, dd MMM yyyy hh:mm:ss') # Sat, 29 Dec 2018 22:00:00
if dateTime <= ignoreBefore:
continue
beginDate = dateTime.date()
beginTime = dateTime.time()
if dateTime > maxDateTime:
maxDateTime = dateTime
if not self.createShift(name, beginDate, beginTime):
self.rollback()
QtGui.QMessageBox.critical(self, u'Schichterstellung fehlgeschlagen',
u'Die Schichten konnten nicht erstellt werden!\nBitte kontaktieren Sie Ihren Datenbank Administrator')
self.commit()
QtGui.QMessageBox.information(self, u'Schichterstellung erfolgreich',
u'Die Schichten wurden erfolgreich import')
try:
tmp = config.config['connection']
except KeyError:
config.config['connection'] = {DBConnection.connName: {}}
config.config['connection'][DBConnection.connName]['last_shift_import_url'] = url
config.config.write()
globalConf.setValueI('last_imported_shift_datetime', maxDateTime.toTime_t())
return True
def createShift(self, name, date, beginTime):
query = QtSql.QSqlQuery()
query.prepare('insert into veranstaltungen (ver_datum, ver_bezeichnung, ver_beginn) values (?, ?, ?)')
query.addBindValue(date)
query.addBindValue(name)
query.addBindValue(beginTime)
query.exec_()
if query.lastError().isValid():
print "Error while creating shift!", query.lastError().text()
return False
#update the parent dialog
self.parent.model.select()
return True
|
[
"it@postgarage.at"
] |
it@postgarage.at
|
5f1f8f1639525065d366c6762675a3cc642d30c2
|
84bb72a8618116c6e61353b73b744a5271bc99aa
|
/scripts/cloud/ppo_gcp.py
|
50607139b0024b559f136bac0d5f06aeebdfe3e7
|
[] |
no_license
|
ndalton12/AMPED
|
46916b109e19e88831150bde6ee8a5358bc91323
|
d93d5379ab91ace92d9c2f112ea5a9d1881fd0cb
|
refs/heads/master
| 2023-05-26T18:27:52.487607
| 2020-12-16T01:35:43
| 2020-12-16T01:35:43
| 310,496,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
import ray
from ray import tune
from ray.rllib.agents.ppo import PPOTrainer
from src.common.env_wrappers import register_super_mario_env
def train():
register_super_mario_env()
ray.init(address="auto")
tune.run(
PPOTrainer,
config={
"env": "super_mario",
"framework": "torch",
"num_workers": 4,
"log_level": "INFO",
"seed": 1337,
"num_envs_per_worker": 5,
"entropy_coeff": 0.01,
"kl_coeff": 0.0,
"num_sgd_iter": 2,
"num_gpus": 1,
"vf_share_layers": False,
},
sync_config=tune.SyncConfig(upload_dir="gs://amp-results"),
stop={"training_iteration": 500},
checkpoint_freq=500,
checkpoint_at_end=True,
#resume=True,
)
if __name__ == "__main__":
train()
|
[
"niall.dalton12@gmail.com"
] |
niall.dalton12@gmail.com
|
c73146252b7243a3e4200c4ac7e3b724ebbf5db5
|
8d79780c26453efd8d0a8ed5b1bea3ad30d54fa2
|
/signs/commands/stopwords.py
|
5c11110480e379df3c92bf4e39bbc89c8cb0c4b8
|
[
"MIT"
] |
permissive
|
autonomio/signs
|
90d217bb873e8417b5c55c6e53fe7551cb18833d
|
0ae345464d21d144e43f1e342ceb07783fe70a4f
|
refs/heads/master
| 2023-08-03T10:57:52.638763
| 2023-07-29T11:17:00
| 2023-07-29T11:17:00
| 138,289,276
| 13
| 3
|
MIT
| 2023-07-29T11:17:02
| 2018-06-22T10:23:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
class Stopwords:
def __init__(self,
docs,
common_stopwords=True,
add_stopwords=[],
min_length=2,
max_threshold=10):
'''Accepts as input a list-of-lists where
each sublist is a document represented in tokens.
docs : list (of lists)
Tokenized documents.
common_stopwords : bool
If a comprehensive list of stopwords should be used. If set
to False then add_stopwords can't be empty.
add_stopwords : list or None
If a list of words is provided, then those will be used
as well as common_stopwords unless it's set to False.
min_lenght : int
Drop all words below this length.
max_threshold : int
Keep all words that are at least this long.
'''
import string
self.common_stopwords = common_stopwords
self.add_stopwords = add_stopwords
self.min_length = min_length
self.max_threshold = max_threshold
self.string = string
self.stopwords = self.stopword_index()
self.docs = docs
for i in range(len(docs)):
self.docs[i] = self.check_stopwords(docs[i])
def stopword_index(self):
import numpy as np
out = []
if self.common_stopwords is True:
from signs.utils.stopwords import stopwords
stopword_list = np.unique(stopwords()).tolist()
stopword_list + self.add_stopwords
else:
stopword_list = self.add_stopwords
for word in stopword_list:
if len(word) > 1:
out.append(word.lower())
stopword_dict = {}
for word in out:
if word[0] in self.string.ascii_letters:
try:
stopword_dict[word[0]].append(word)
except KeyError:
stopword_dict[word[0]] = [word]
return stopword_dict
def check_stopwords(self, doc):
out = []
for word in doc:
# always keep words longer than 10 characters
if len(word) >= self.max_threshold:
out.append(word)
# always pass words shorter than 2 characters
elif len(word) <= self.min_length:
continue
elif word[0] not in self.stopwords.keys():
out.append(word)
elif word not in self.stopwords[word[0]]:
out.append(word)
return out
|
[
"mailme@mikkokotila.com"
] |
mailme@mikkokotila.com
|
5c6512d6f30eb845155ab18b6ca2551e3db2692e
|
80cab747b3a1ed9639ba45b86d1c6871c4d23fd0
|
/fb_mybot/apps.py
|
0bf4eb4f779c4b7d9328d96a6bd738725c1c5c1b
|
[] |
no_license
|
HadesSama/fuujisamytestbot
|
efd43440661e3e83ee26ea8af80a7260a60a3b50
|
4aaeda1c8e2195914b33a04de8435bb94991f715
|
refs/heads/master
| 2021-01-20T10:28:28.965635
| 2017-03-04T23:47:56
| 2017-03-04T23:47:56
| 83,932,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
from django.apps import AppConfig
class FbMybotConfig(AppConfig):
name = 'fb_mybot'
|
[
"samybensbih@gmail.com"
] |
samybensbih@gmail.com
|
c403caa88f7b02079838aa6f7f4193a522735a2a
|
8e41c877244f04c79cb0828e582f23fc19fce31c
|
/david/modules/works/model/work.py
|
eac66b28f1eccb686aed670193c1ec97f8e21f71
|
[
"MIT"
] |
permissive
|
ktmud/david
|
8214e0dd3c6b74f696e4d9a038eadc2e8e22223b
|
4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f
|
refs/heads/master
| 2016-08-05T22:56:42.548100
| 2014-01-19T11:57:18
| 2014-01-19T12:05:18
| 13,518,612
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
# -*- coding: utf-8 -*-
from config import SITE_ROOT
from david.core.db import db, orm, func, CatLimitedQuery, UidMixin
from david.core.accounts import User
from david.core.attachment import MediaMixin
from david.lib.utils import truncate, striptags
from david.ext.babel import lazy_gettext as _
from david.modules.artist.model import Artist
K_WORK = 210
class Work(db.Model, UidMixin, MediaMixin):
kind = K_WORK
kind_name = 'work'
id = db.Column(db.Integer, primary_key=True)
cat = db.Column('cat', db.SmallInteger, index=True, nullable=False)
title = db.Column(db.String(255), nullable=False)
owner_id = db.Column(db.Integer, nullable=False)
artist_id = db.Column(db.Integer, nullable=False)
desc = db.Column(db.Text())
pubdate = db.Column(db.Date)
create_at = db.Column(db.DateTime, default=func.now())
update_at = db.Column(db.DateTime, default=func.now(), onupdate=func.utc_timestamp())
deleted = db.Column(db.Boolean, default=False)
query_class= CatLimitedQuery
@property
def _DEFAULT_PIC(self):
return self.extended_self._DEFAULT_PIC
@property
def cat_id(self):
return self.cat
cat_name = 'work'
catname = property(lambda x: _(x.cat_name))
@property
def artist(self):
return Artist.get(self.artist_id)
def url(self):
return SITE_ROOT + 'work/' + self.slug
|
[
"jyyjcc@gmail.com"
] |
jyyjcc@gmail.com
|
0278c67c002e9408e6651d0eb061d1606c8955f0
|
782ea192b3d7bb6392488c841ec60392a7dd5569
|
/home/models.py
|
bba6bb452fe7dd171fca3d8843eacceb472fd124
|
[] |
no_license
|
GM1957/mysite
|
9526658800b2deefedb1da07d5e152acfe9a618e
|
ee2dcdd8b9c6c3243c7b615ae0f72c55ec745cb1
|
refs/heads/master
| 2022-12-22T08:40:21.846110
| 2020-10-03T17:17:54
| 2020-10-03T17:17:54
| 300,935,268
| 0
| 0
| null | 2020-10-03T17:56:46
| 2020-10-03T17:15:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.utils.timezone import now
# Create your models here.
class Product(models.Model):
productUniquename = models.CharField(max_length=200)
product_id = models.AutoField(primary_key=True)
product_name = models.CharField(max_length=200,unique = True)
description = models.CharField(max_length=1000)
pub_date = models.DateField(null = True,blank=True,default=now)
image = models.ImageField(upload_to="home/images")
#slug = models.CharField(max_length=200)
def __str__(self):
return self.product_name
class Contact(models.Model):
contactsno = models.AutoField(primary_key = True)
name = models.CharField(max_length=300)
phonenumber = models.CharField(max_length=13)
email = models.CharField(max_length=100)
content = models.TextField()
timeStamp = models.DateTimeField(auto_now_add=True, blank=True)
def __str__(self):
return self.name
class userdata(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE,primary_key=True)
profile_pic = models.ImageField(upload_to="home/images/userprofileimages",default='home/images/userprofileimages/defaultuser.png')
def __str__(self):
return str(self.user)
# class userinfo(models.Model):
# user = models.OneToOneField(User,null=True,on_delete=models.CASCADE)
# profile_pic = models.ImageField(upload_to="home/images/userprofileimages",blank=True,default='home/images/userprofileimages/defaultuser.png')
# def __str__(self):
# return str(self.user)
|
[
"Tanmoybhowmik03@gmail.com"
] |
Tanmoybhowmik03@gmail.com
|
23db1f0ed3b70bb1a06804fbf9ed2880e100f1f4
|
c2d533b32c258280293be3f3bf20afe7fd77f1ac
|
/home/debian/brewing/log.py
|
bae24f68a12a10a641c07a4bb0a83aa3fc8f7447
|
[] |
no_license
|
leograba/final_paper_tcc
|
26bc1621272cbb587a750a7eb994b5e2b80428da
|
9743055eea2e6245d68295da7897e6e7a43d1b34
|
refs/heads/master
| 2021-01-18T16:26:42.147756
| 2016-05-29T19:51:07
| 2016-05-29T19:51:07
| 50,020,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
import temp
temp.tlog()
|
[
"leogveiga@gmail.com"
] |
leogveiga@gmail.com
|
0b7d0093a662db29643feb27131176aa436c1bb7
|
29aa0266f6cb5578730cc44e6f792a7eeb6b01a5
|
/src/big_o_notation.py
|
1f93e368b1dcd5f8d3e3a83fd41aa3210bdf92c2
|
[] |
no_license
|
Kandelonius/python-lectures-LS
|
7ad0e1690dd5e474dbb6f9595fa488c1b58ef2aa
|
1d81a4e52238ae06894050168bb713760e47a804
|
refs/heads/master
| 2022-12-16T14:02:33.638479
| 2020-09-23T22:29:31
| 2020-09-23T22:29:31
| 286,515,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
my_list = [1, 2, 3, 4, 5]
longer_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def print_list(arr): # O(n) linear
for n in arr:
print(n)
print_list(my_list)
def print_pairs(items): # O(n^2) quadratic
for item_one in items:
for item_two in items:
print(item_one, item_two)
def my_func(arr):
a = 1
b = 2
c = a * b
for x in range(1000):
print(x)
for thing in arr:
x = 10
y = 20
z = x * y
print(thing)
my_func(my_list)
my_func(longer_list)
# "on the order of"
def two_loops(arr): # still O(n) linear
for x in range(1000000000):
z = x * x
print(z)
for thing in arr:
print(thing)
for thing_again in arr:
print(thing_again)
# def simple_recurse(n):
# if n <= 1:
# return n
# simple_recurse(n)
#
# def weird_recurse(num):
# if n <= 1:
# return num
# simple_recurse(num - 1)
# simple_recurse(num - 1)
# simple_recurse(num - 1)
def two_n_demo(n):
if n == 0:
return 1
a = two_n_demo(n - 1)
b = two_n_demo(n - 1)
return a + b
two_n_demo(3)
|
[
"shane.kaestner@yahoo.com"
] |
shane.kaestner@yahoo.com
|
8ad2d0a9ef495db9e47b6c1c13825bd57cc2fef6
|
4c00d6993f7d934fd57314df275e37ce3d231c39
|
/defrag.py
|
66ad04e6883ba9651f624597344fb2d9349cca59
|
[] |
no_license
|
cbhl/cusec12
|
56ce830d70ba9deee92fba745260bcca1a0bd1c3
|
0967a471833fc8f2d55fd8464b96ddadb195ad67
|
refs/heads/master
| 2016-09-06T04:10:12.421705
| 2012-01-21T04:55:42
| 2012-01-21T04:55:42
| 3,220,147
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,927
|
py
|
#!/usr/bin/env python
'''
Defrag
CUSEC Coding Competition
Waterloo Software Engineering
@author Michael Chang, Alice Yuan, Joshua Kaplin, Aaron Morais
'''
import defrag_scorer
from strategy import *
class TimeoutException(Exception):
pass
if __name__ == '__main__':
import argparse
import signal
def timeout_handler(signum, frame):
raise TimeoutException()
parser = argparse.ArgumentParser(
description='defragments a fictional file system'
)
parser.add_argument(
'--input',
required=False,
help='Input file for the disk (if not specified, will read from disk.txt)'
)
args = parser.parse_args()
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(295) # five minutes, minus a five second buffer
# signal.alarm(30)
try:
inputFilename = 'disk.txt' if args.input == None else args.input
strategies = []
#strategies.append(BaseStrategy.BaseStrategy())
strategies.append(MoveToFrontStrategy.MoveToFrontStrategy())
strategies.append(MoveToEndStrategy.MoveToEndStrategy())
strategies.append(QuickSortBaseStrategy.QuickSortBaseStrategy())
# strategies.append(SleepStrategy.SleepStrategy())
bestResult = []
disk = defrag_scorer.loadDisk(inputFilename)
bestScore = defrag_scorer.calculateScore(disk)
for strategy in strategies:
disk = defrag_scorer.loadDisk(inputFilename)
final_disk = strategy.calculate(disk)
result = strategy.result()
score = defrag_scorer.calculateScore(final_disk)
if score > bestScore or len(result) < len(bestResult):
bestResult = result
bestScore = score
except TimeoutException:
pass; # fall through to returning the result
# output
for line in bestResult:
print line
|
[
"m9chang@uwaterloo.ca"
] |
m9chang@uwaterloo.ca
|
84ffc63b90733910ea156b66f03e2393e38acd03
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/9CWPv99o4EjZgHnkq_20.py
|
51d78dfb8d43ec2f17ffec1bef69618a23bddc68
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
"""
Write a function that divides a list into chunks such that the sum of each
chunk is `<= n`. Start from the left side of the list and move to the right.
### Examples
divide([1, 2, 3, 4, 1, 0, 2, 2], 5)
➞ [[1, 2], [3], [4, 1, 0], [2, 2]]
divide([1, 0, 1, 1, -1, 0, 0], 1)
➞ [[1, 0], [1], [1, -1, 0, 0]]
divide([2, 1, 0, -1, 0, 0, 2, 1, 3], 3)
➞ [[2, 1, 0, -1, 0, 0], [2, 1], [3]]
### Notes
* The max of the list will always be smaller than or equal to `n`.
* Use the **greedy approach** when solving the problem (e.g. fit as many elements you can into a chunk as long as you satisfy the sum constraint).
"""
def divide(lst, n):
r, add = [], []
i = 0
for x in lst:
if i + x <= n:
add.append(x)
i += x
else:
r.append(add)
add = []
add.append(x)
i = x
r.append(add)
return r
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
2018a3d9a6c42fdf513d453e9e9e752d29105e14
|
e62513f731a7e0c0f93df82fad4a7c391ae79503
|
/examples/crypto_ticker.py
|
28c1b0cb756302b1f0204ca0b27fe9326392f9ba
|
[
"MIT"
] |
permissive
|
bjarnekvae/pymobitec-flipdot
|
173157c6954d014b87ad444405466a1157f52d11
|
b51545e8b1b792c2c4e970cf9ce8b207ffabe9c2
|
refs/heads/master
| 2021-06-25T11:27:28.342520
| 2021-01-09T00:54:09
| 2021-01-09T00:54:09
| 171,110,798
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,188
|
py
|
import serial
import time
import pymobitec_flipdot as flipdot
import cryptocompare
import RPi.GPIO as GPIO
crypto_currency = 'ADA'
fiat_currency = 'USD'
LED_flash_threshold = 0.005 # % price change per period
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.output(18, GPIO.HIGH)
prev_price = 1.0
with serial.Serial('/dev/ttyS0', 4800, timeout=1) as ser:
while True:
price = cryptocompare.get_price(crypto_currency, curr=fiat_currency)
if price is not None:
price = price[crypto_currency]['USD'] + 0.00001 # extra digits to fill out display
else:
time.sleep(60)
continue
if price > 1.0:
price_str = str(price)[:5]
else:
price_str = str(price)[1:6]
msg = flipdot.set_text(price_str, 1, 0, flipdot.Fonts.numbers_14px)
ser.write(msg)
if (price-prev_price)/prev_price > LED_flash_threshold:
for i in range(3):
GPIO.output(18, GPIO.LOW)
time.sleep(0.04)
GPIO.output(18, GPIO.HIGH)
time.sleep(0.06)
prev_price = price
time.sleep(60)
|
[
"bjarnekvae@gmail.com"
] |
bjarnekvae@gmail.com
|
d537973b9f91bc317cd461a50404c9e0a259fd8f
|
4f380f74e0b0992427e5aa5d7c0bfc7ce841c98f
|
/main.py
|
138fa0aeeb06cf83cd4f653c068ee9a201fd0a3b
|
[] |
no_license
|
MurrayC7/DeepISP
|
1c4512087ce3ea40e274dd340a51e9f9500488c4
|
303507174c240a9f5048a76fdaa6045069f9d395
|
refs/heads/master
| 2022-04-03T20:17:36.407947
| 2019-11-28T07:49:17
| 2019-11-28T07:49:17
| 215,030,415
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
import argparse
import os
import tensorflow as tf
tf.set_random_seed(819)
from .model import Unet
parser = argparse.ArgumentParser(description='deepisp')
parser.add_argument('--dataset_dir', dest='dataset_dir', default='fivek', help='path of the dataset')
args = parser.parse_args()
def main():
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if not os.path.exists(args.result_dir):
os.makedirs(args.result_dir)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig, graph=tf.get_default_graph()) as sess:
model = Unet(sess, args)
model.train(args) if args.phase == 'train' else model.test(args)
if __name__ == '__main__':
tf.app.run()
|
[
"330784960@qq.com"
] |
330784960@qq.com
|
6b2b48dfde1b45519659285e8341242c9d50a5b9
|
4fe0dae5fcac233b087326b907513b3fb390e87c
|
/qidian_login/qidian_login/spiders/bookshelf.py
|
789234414771125ffa7c77e946ca1cd94bd3de48
|
[] |
no_license
|
cjg1994/crawl_practice
|
9b91efb1e237b91c2714828f10a2fa7c4e5c452b
|
8fc62c03b30dcce6b52b152ae03358aa9159be9a
|
refs/heads/master
| 2022-11-19T14:56:51.947369
| 2020-07-13T13:18:56
| 2020-07-13T13:18:56
| 279,276,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,260
|
py
|
# -*- coding: utf-8 -*-
"""
通过browsercookie可以获取chrome浏览器存在本地的各个网站的cookie对象,< Cookie key=value domain>
构造一个cookie字典,通过请求发送实现自动登录
"""
import scrapy
import browsercookie #browsercookie库的使用
from qidian_login.items import QidianLoginItem
cookie='Cookie: _csrfToken=W3JqtsH9COGyqaCjsvl3TgJe8E6YbrpcA2CAWen3; newstatisticUUID=1571035550_1737766613; ywkey=ywzElAb4Jnjy; ywguid=851000542593; ywopenid=A47F8ADC20F3492F392A7F56C0DBF5DB; e1=%7B%22pid%22%3A%22qd_P_my_bookshelf%22%2C%22eid%22%3A%22qd_M194%22%2C%22l3%22%3A1%2C%22l2%22%3A2%2C%22l1%22%3A3%7D; e2=%7B%22pid%22%3A%22qd_P_my_bookshelf%22%2C%22eid%22%3A%22qd_M194%22%2C%22l3%22%3A1%2C%22l2%22%3A2%2C%22l1%22%3A3%7D'
class BookshelfSpider(scrapy.Spider):
name = 'bookshelf'
def __init__(self):
cookiejar=browsercookie.chrome()#调用chrome方法,返回的是一个cookiejar.Cookiejar类
self.cookie_dict={}
for cookie in cookiejar:#cookiejar可以直接遍历,里面是一个个形如 <Cookie key=value domain>的cookie对象
#访问该对象的属性cookie.name cookie.value cookie.value
if cookie.domain==".qidian.com":
if cookie.name in ["_csrfToken",
"newstatisticUUID",
"ywkey",
"ywguid",
"ywopenid",
"e1",
"e2"] :
self.cookie_dict[cookie.name]=cookie.value
def start_requests(self):
url="https://my.qidian.com/bookcase?targetTab=tabTarget1" #个人书架的网址
yield scrapy.Request(url,cookies=self.cookie_dict)
def parse(self, response):
book_list=response.xpath('//table[@id="shelfTable"]/tbody/tr')
for book in book_list:
item=QidianLoginItem()
item["category"]=book.xpath('td[2]/span/b/a/text()').extract()[0]
item["title"]=book.xpath('td[2]/span/b/a/text()').extract()[1]
item["update"]=book.xpath('td[3]/text()').extract()[0]
item["author"]=book.xpath('td[4]/a/text()').extract()[0]
yield item
|
[
"532223911@qq.com"
] |
532223911@qq.com
|
148be16b7639ef836ce0d724e7b939cb91f4eb7d
|
e1a2ddd7e382a450f75f47a366f740e807bf6d99
|
/area.py
|
6d46b185c2688b712e294169f7740fbd61845300
|
[] |
no_license
|
Padmajaya123/simple-python-programs
|
ddb37b410800b1601c640d907e1a13deba8f40c7
|
28a40e0ce2764fb7ab33ad234c6de668ed99dc04
|
refs/heads/master
| 2022-09-29T04:16:10.854631
| 2020-06-02T04:11:24
| 2020-06-02T04:11:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
a=int(input("enter the length="))
b=int(input("enter the breadth="))
as1=0
sr=0
c=0.0
as1=a*a
sr=a*b
r=int(input("enter radius="))
c=3.14*r*r
print("Area of square=",as1)
print("Area of rectangle=",sr)
print("Area of circle=",c)
|
[
"saipadmarekha@gmail.com"
] |
saipadmarekha@gmail.com
|
a9982fa549dd8e35fbd52c6ff5c868688b657c02
|
a9eec5b1dbccb40f4bf4daad61bf4a1e669218b0
|
/api/poc-api/apps/api/v1/notifications/views.py
|
dcc6713ef86124cf761773813675d06e3f887ecf
|
[] |
no_license
|
Gaiachain-Ltd/Charcoal-Web-Panel
|
f515a92ee6660e3bf53026465449227aa5255bee
|
18ab01b3bb0906e4ebf9fd1864a1e37562ce53cf
|
refs/heads/master
| 2023-01-20T09:16:48.327582
| 2020-11-25T11:58:09
| 2020-11-25T11:58:09
| 315,596,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
from rest_framework.viewsets import ReadOnlyModelViewSet
from apps.notifications.models import Notification
from config.swagger_schema import CustomSchema
from .serializers import NotificationSerializer
class NotificationViewSet(ReadOnlyModelViewSet):
serializer_class = NotificationSerializer
schema = CustomSchema()
def get_queryset(self):
user = self.request.user
queryset = Notification.objects.filter(users=user).exclude(read_by=user)
user.read_notifications.add(*queryset)
return queryset
|
[
"sbondar@milosolutions.com"
] |
sbondar@milosolutions.com
|
d3fc9110d178c1cc4d088cd664454800902db469
|
3c2cc8910c4a333a44d2d7b22489ef8d5ddb6a13
|
/src/zvt/domain/quotes/stock/stock_1mon_hfq_kdata.py
|
eef6e6ec4b3b171e2a4a4a850183a722c32f1e8f
|
[
"MIT"
] |
permissive
|
zvtvz/zvt
|
6341dc765177b1e99727207f1608b730cbbb705a
|
03aee869fd432bb933d59ba419401cfc11501392
|
refs/heads/master
| 2023-08-28T10:05:29.185590
| 2023-08-01T10:19:03
| 2023-08-01T10:19:03
| 179,451,497
| 2,782
| 922
|
MIT
| 2023-04-04T09:31:03
| 2019-04-04T08:06:57
|
Python
|
UTF-8
|
Python
| false
| false
| 560
|
py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.orm import declarative_base
from zvt.contract.register import register_schema
from zvt.domain.quotes import StockKdataCommon
KdataBase = declarative_base()
class Stock1monHfqKdata(KdataBase, StockKdataCommon):
__tablename__ = "stock_1mon_hfq_kdata"
register_schema(
providers=["joinquant", "em"], db_name="stock_1mon_hfq_kdata", schema_base=KdataBase, entity_type="stock"
)
# the __all__ is generated
__all__ = ["Stock1monHfqKdata"]
|
[
"5533061@qq.com"
] |
5533061@qq.com
|
7cf077b680285fd75d19fc7ffb8aaee66ef25fa3
|
53ac1a010ae901c0af1a73e46683f3d0f0ef973c
|
/hack/codeRS/mythread.py
|
0c7743231a5bc85f6421f80498dee923af87ba47
|
[] |
no_license
|
CO18344/Hack
|
c1ffbdf38d69a0e3a855ab813339ec022f195e20
|
e529e2c405a71776cd9e899c859e470e2692439c
|
refs/heads/main
| 2023-08-04T09:18:47.464166
| 2021-08-31T22:12:16
| 2021-08-31T22:12:16
| 401,844,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,858
|
py
|
import threading
from customauth.models import MyUser
from codeRS.models import Solved, Problem
class CodeProcessor(threading.Thread):
def __init__(self,request):
threading.Thread.__init__(self)
self.waiting = []
self.success = []
self.failed = []
self.request = request
def run(self):
if request.user.is_anonymous:
return redirect("/login/")
if request.method == 'GET' or request.method == 'POST':
try:
test_bubble = False
print('See noi: ',os.getcwd())
id_ = request.GET['id']
path_problem = os.getcwd()+os.sep+'problems'+os.sep+str(id_)
run_path = path_problem + os.sep + 'run.exe'
out_path = path_problem + os.sep + 'out.txt'
f = open(path_problem+os.sep+'starter.txt','r')
starter = f.read()
f.close()
if request.method == 'POST':
fname=''
test_bubble = True
code_content = request.POST.get('task','')
if Problem.objects.get(pk=int(id_)).language == 'cpp':
fname='submit.cpp'
else:
fname='submit.py'
f_code = open(path_problem + os.sep + fname,'w')
f_code.write(code_content)
f_code.close()
test_folder_path = path_problem + os.sep + 'TestCases'
testfile_path = path_problem + os.sep + 'TestCases' + os.sep + 'test.txt'
is_compile_sucess = False
if Problem.objects.get(pk = int(id_)).language=='cpp':
status = os.system('g++ ' + path_problem + os.sep + fname +' -o ' + run_path)
if status == 1:
messages.error(request,'Compilation error occurred')
else:
messages.success(request,'Compiled Successfully')
is_compile_sucess = True
print(run_path)
print(testfile_path)
print(out_path)
os.system(run_path + '< ' + testfile_path + ' >' + out_path)
elif Problem.objects.get(pk = int(id_)).language=='python':
status = os.system('python ' + path_problem + os.sep + fname + '< '+ testfile_path + ' >' + out_path)
if status == 1:
messages.error(request,'Compilation error occurred')
else:
is_compile_sucess = True
messages.success(request,'Compiled Successfully')
if is_compile_sucess:
f_ref = open(test_folder_path + os.sep + 'ref.txt','r')
ref_list = f_ref.read().splitlines()
f_ref.close()
f_out = open(out_path,'r')
output_list = f_out.read().splitlines()
f_out.close()
for i in range(len(ref_list)):
try:
ref = ref_list[i]
out = output_list[i]
if ref == out:
print(' Output ',i, 'passed')
else:
print('MISMATCH')
except IndexError:
print('Mismatch')
if ref_list == output_list:
User = MyUser.objects.get(pk=request.user.id)
User.score += Problem.objects.get(pk=int(id_)).score
User.save()
solved = Solved(uid = request.user, pid = Problem.objects.get(pk=int(id_)))
solved.save()
|
[
"satviksingh28@gmail.com"
] |
satviksingh28@gmail.com
|
ee7db765838bb6a38ab0336af7c697e96935fbd6
|
2d83f8a0d03fcc52edf6cdb0520648f0e60d35c0
|
/HCF.py
|
28eb30707f5b7dda02d566422a299ba24959a0f0
|
[] |
no_license
|
VishnuSahani/Python1
|
d490898baed893170bda909e9f599c584e09da7f
|
638cdddce100ebcb3eac6c807ef5c3de6e81fe7f
|
refs/heads/master
| 2020-06-04T18:33:22.450233
| 2019-07-17T06:08:44
| 2019-07-17T06:08:44
| 192,145,942
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
n = int(input("Enter the first number to find HCF of Number : "))
m = int(input("Enter the second number to find HCF of Number : "))
def find_hcf(x,y):
if x>y:
s = y
else:
s = x
for i in range(1 , s+1):
if x % i== 0 and y % i == 0:
hcf = i
return hcf
print("the H.C.F. of", n ,"amd", m,"is", find_hcf(n,m))
|
[
"vishnu83550@gmail.com"
] |
vishnu83550@gmail.com
|
f9da977baa73f80e23b13009c0d7c375fb78ca28
|
7ada2807aed5be68c5a45f9b8fe30f315136be27
|
/learn_motif.py
|
b2930e2a2eb2d2e59bd0af5aa5e58a6f500b963d
|
[] |
no_license
|
naterich2/oops_meme
|
1128618871ae03b2da03db3a374425b7479ad3b9
|
7f86d7b63f1628296d40d789e6bf381c764307b2
|
refs/heads/master
| 2021-01-05T07:53:03.533754
| 2020-02-13T00:10:36
| 2020-02-13T00:10:36
| 240,941,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,647
|
py
|
#!/usr/bin/env python
# Written by: Nathan Richman <nate@nrichman.dev>
# For BMI 776 (Advanced Bioinformatics) at UW-Madison
# Spring 2020
# Part of homework 1, this file contains classes and a main function to run the MEME algorithm
# with an OOPS model and exhaustive subsequence method as a starting point.
# Framework code with a main function and the Argparser was provided.
import argparse, os, sys
import numpy as np
import pandas as pd
import collections
import seq_logo
import matplotlib.pyplot as plt
# You can choose to write classes in other python files
# and import them here.
class MEME(object):
"""Class for the EM algorithm"""
def __init__(self, sequences, width, pseudocount):
"""TODO: to be defined.
Parameters
----------
sequences : array-like
Array of sequences
width: int
Width of sequence to look for
pseudocount: int
Pseudocount to use when running the M-step
"""
self._sequences = sequences
if width > (len(sequences[0]) + 1)/2:
raise ValueError("Width must be less than (len(sequence) + 1)/2")
self._width = width
self._pseudo = pseudocount
# Number of starting positions, L - W + 1
self._starting_pos = len(sequences[0]) - width + 1
# Probability matrix, first column is background, starting at position 1 is the first position on the motif, initialize to all 0's
# TODO check dimension on p, additionally check indexing on n_ck, p_ck p_ck and n_ck are 1-indexed since 0 is background, the rest is 0-indexed
self._P = pd.DataFrame(data=np.zeros((4,width+1)),index=['A','C','G','T'],columns=list(range(width + 1)))
# Matrix of starting position probabilities for each sequence, initialize to 0
self._Z = np.zeros((len(sequences),self._starting_pos))
@property
def p(self):
"""Getter for _p
Returns
--------
p_matrix
"""
return self._P
@property
def z(self):
"""Getter for _z
Returns
--------
z_matrix
"""
return self._Z
def _normalize(self,to_normalize):
"""_normalize: Helper function that normalizes a distribution in log-space so that all probabilities in the distribution sum to 1 in real space
Parameters
----------
to_normalize : 1D array
Distribution in log-space to normalize
Returns
-------
Normalized distribution in real space
"""
# https://stats.stackexchange.com/questions/6616/converting-normalizing-very-small-likelihood-values-to-probability
# For a precision of \eta = 10^-d for d digits of precision with n likelihoods
# 1. Subtract the maximum likelihood from all logs.
# 2. Throw away values less than the logarithm of \eta/n
#
# \eta = 10^-16 for 16 decimals
# ln(\eta / n) = ln(\eta) - ln(n)
thresh = np.log(10**-16) - np.log(len(to_normalize))
my_max = np.max(to_normalize)
# If the value is too small set it to float("-inf") which will successfully evaluat to 0 through the exp function
vals = [np.exp(a - my_max) if (a - my_max) > thresh else float("-inf") for a in to_normalize ]
vals = vals/np.sum(vals)
return vals
def test(self,seq):
counts = collections.Counter(''.join(self._sequences))
p_matrix = pd.DataFrame(data=np.zeros_like(self._P),index=['A','C','G','T'],columns=list(range(self._width + 1)))
############ Populate p_c_k, k > 0
# Go through bases, and set probability of the found character at that position equal to 2/5, and the others to 1/5
counts_seq = collections.Counter(seq)
for i,base in enumerate(seq):
for letter in p_matrix.index:
if letter == base:
p_matrix.loc[base,i+1] = (self._pseudo + 1)/(self._pseudo*4 + 1)
else:
p_matrix.loc[letter,i+1] = (self._pseudo)/(self._pseudo*4 + 1)
############ Populate p_c_k, k = 0
n_0 = {}
for base in ['A','C','G','T']:
# n_c_0 = total # of c's in dataset - count of c's in motif
n_0[base] = (counts[base] - counts_seq[base])
my_sum = sum(n_0.values())
for i,base in enumerate(['A','C','G','T']):
# p_matrix_0 = (n_c_0 + pseudocount)/sum n_b_0 + 4*pseudocount
p_matrix.iloc[i,0] = (n_0[base] + self._pseudo)/(my_sum + self._pseudo*4)
#TODO: Find what to do with background for now set all to .25
# p_matrix.loc[:,0] = .25
log_likelihood, p_matrix, z_matrix = self._run_em(p_matrix)
self._P = p_matrix
self._Z = z_matrix
print("New starting position log-likelihood of: {:.3f}".format(log_likelihood))
print("Finished exhaustive subsequence, starting EM")
likelihood = self.run_em()
def exhaustive_subsequence(self,iterations=1):
"""Initialize the EM algzorithm by using the exhaustive-subsequence method: run EM for a specified number of iterations on each possible length W subsequence of the input.
Parameters
----------
iterations : int
Default: 1
Number of iterations to run the EM algorithm on each distinct subsequence
Returns
-------
p : array-like
Matrix of probabities of letter c in column k of the highest likelihood starting position
Additionally, sets self._p with this matrix
"""
# Find total number of characters in Dataset:
counts = collections.Counter(''.join(self._sequences))
# Find unique set of length W subsequences so we don't have any redundencies
unique = set()
for sequence in self._sequences:
for start in range(self._starting_pos):
# Iterate through the starting positions and make a substring at the starting position and ending at that position plus the width
unique.add(sequence[start:(start+self._width)])
# Now that we have the set, we can iterate through all the unique sequences and set their p-matrix
# Keep track of best log-likelihood
best = float("-inf")
best_seq = ''
print("Total number of subsequences to evaluate: ",len(unique))
evaluated = 0
# my_j = 0
for seq in unique:
# if my_j < 500:
# print(my_j)
# my_j += 1
# continue
p_matrix = pd.DataFrame(data=np.zeros_like(self._P),index=['A','C','G','T'],columns=list(range(self._width + 1)))
############ Populate p_c_k, k > 0
# Go through bases, and set probability of the found character at that position equal to 2/5, and the others to 1/5
for i,base in enumerate(seq):
for letter in p_matrix.index:
if letter == base:
p_matrix.loc[base,i+1] = (self._pseudo + 1)/(self._pseudo*4 + 1)
else:
p_matrix.loc[letter,i+1] = (self._pseudo)/(self._pseudo*4 + 1)
############ Populate p_c_k, k = 0
n_0 = {}
# Number of A's C's G's and T's in the sequence.
counts_seq = collections.Counter(seq)
for base in ['A','C','G','T']:
# n_c_0 = total # of c's in dataset - count of c's in motif
n_0[base] = (counts[base] - counts_seq[base])
my_sum = sum(n_0.values())
for i,base in enumerate(['A','C','G','T']):
# p_matrix_0 = (n_c_0 + pseudocount)/sum n_b_0 + 4*pseudocount
p_matrix.iloc[i,0] = (n_0[base] + self._pseudo)/(my_sum + self._pseudo*4)
#TODO: Find what to do with background for now set all to .25
# p_matrix.loc[:,0] = .25
log_likelihood, p_matrix, z_matrix = self._run_em(p_matrix)
evaluated += 1
if log_likelihood > best:
self._P = p_matrix
self._Z = z_matrix
best = log_likelihood
best_seq = seq
print("{:s} log-likelihood of: {:.3f}, best log-likelihood of: {:.3f}, {:s}. {} left to evaluate".format(seq,log_likelihood,best,best_seq,len(unique) - evaluated))
print("Finished exhaustive subsequence, starting EM")
likelihood = self.run_em()
# easy test passed
def _calc_seq_prob(self,seq,z,p_matrix):
"""_calculate_seq_prob calculates the log probability of a sequence given a sequence (seq), the motif starting position (z), and a probability matrix (p_matrix)
Parameters
----------
seq : string
Sequence of whcih to calculate the probability
z : int
Starting position of motif in sequence
p_matrix : array-like
probability matrix of character c in row k of matrix, where the 0th row is the background
Raise
-------
ValueError when z > L - W + 1
Returns
-------
log-probability of the sequence
"""
if z > (len(seq) - (p_matrix.shape[1] - 1)):
raise ValueError("z must be less than L - W")
total = 0
for i,base in enumerate(seq):
# Background
if i < z or i > (z+(p_matrix.shape[1]-1)-1):
total += np.log(p_matrix.loc[base,0])
else:
# 0th index of the motif is at column 1 in the p_matrix so use i - z + 1
total += np.log(p_matrix.loc[base,(i - z + 1)])
return total
# easy test passed
def _calc_seq_prob_sum(self,seq,p_matrix):
"""Calculate the sum of sequence probabilities over all z \in {0,...,(L - W)}
Parameters
----------
seq : string
Sequence to calculate sum on
p_matrix : array-like
probability matrix for character c in column k of the motif, with background being in column 0
Returns
-------
tuple: (log-probability, [individual probabilities])
[0]: log-probability of the sum of sequence probabilities for all z \in {0,...,(L - W)}
[1]: list of the probability of the sequence at the individual starts
"""
# Initialize total at -inf corresponding to 0 probability
total = float("-inf")
individual = [0]*(len(seq) - (p_matrix.shape[1] - 1) + 1)
# Iterate through all starting positions: {0,...,(L - W)}
for z in range(len(seq) - (p_matrix.shape[1] - 1) + 1):
# log(x + y) = x' + log(1 + exp(y' - x')), where ' denotes log-space
# x' is supposed to be larger, i.e. closer to 0, and it starts at 0 so it should be larger
# Define the total as x and y as the new sequence to add on
prob = self._calc_seq_prob(seq,z,p_matrix)
individual[z] = prob
#if z == 0:
total = prob + np.log(1 + np.exp(total - prob))
#else:
# total = total + np.log(1 + np.exp(prob - total))
return (total,individual)
def _calc_log_likelihood(self,p_matrix,z_matrix):
"""TODO: Docstring for _calc_log_likelihood.
Parameters
----------
p_matrix : TODO
z_matrix : TODO
Returns
-------
Log-likelihood of data and starting positions given current parameters
"""
# Normalize z matrix so we use the max percentage as the starting position.
# i.e. convert .1 .1 .1 .7 to 0 0 0 1
# for i,row in enumerate(z_matrix):
# max_index = np.argmax(row)
# z_matrix[i,:] = np.zeros_like(row)
# z_matriz[i,max_index] = 1
# total = 0
# n = len(self._sequences)
m = z_matrix.shape[1]
# # Enumerate all sequences
# for i, seq in enumerate(self._sequences):
# # Enumerate all starting positions
# for j, col in enumerate(z_matrix[i,:]):
# # Calculate sequence prob
# total += (z_matrix[i,j]*self._calc_seq_prob(seq,j,p_matrix) + n*np.log(1/m))
total = 0
for i,seq in enumerate(self._sequences):
total += self._calc_seq_prob_sum(seq,p_matrix)[0] - np.log(m)
return total
def run_em(self):
"""Run the EM algorithm until convergence
Returns
-------
float:
current log-likelihood
"""
current = float("-inf")
i = 1
print(self._P)
while(True):
log_likelihood,p_matrix,z_matrix = self._run_em(self._P)
i += 1
if (current - log_likelihood > -.05) and (current - log_likelihood < 0):
current = log_likelihood
print("Difference less than -.05. EM converged. Log-likelihood: ", log_likelihood)
break
elif log_likelihood > current:
current = log_likelihood
self._P = p_matrix
self._Z = z_matrix
print("Finished iteration: ", i," \tLog-likelihood: ",current)
else:
print(self._P)
raise ValueError("log-likelihood not increasing, something is wrong...")
return current
def _run_em(self,p_matrix):
"""TODO: Docstring for _run_em.
Parameters
----------
p_matrix : array-like
Current matrix of probabilities for character c at position k of the motif, index starting at 1 since index 0 is background
0 1 2 .... W
A 0.0 0.0
C 0.0 0.0
G 0.0 0.0
T 0.0 0.0
Returns
-------
tuple of (log_likelihood, p_matrix, z_matrix)
"""
################### Initialization #############
# Use local versions of p_matrix and z_matrix n_matrix
_p = pd.DataFrame().reindex_like(self._P).fillna(0)
_z = np.zeros_like(self._Z)
_n = pd.DataFrame().reindex_like(self._P).fillna(0)
################### E-Step #####################
# Compute Z_{i,j}^t
seq_sums = [0]*len(self._sequences)
for i,sequence in enumerate(self._sequences):
sequence_sum,individuals = self._calc_seq_prob_sum(sequence,p_matrix)
seq_sums[i] = sequence_sum
# Set the values for the sequence by dividing the individual probability by the sum, using subtraction in log-space and normalize
_z[i,:] = self._normalize([(x - sequence_sum) for x in individuals])
################### M-Step #####################
##### populate n_matrix
for i, sequence in enumerate(self._sequences):
for j, base in enumerate(sequence):
######### The following only calculates n_c,k where k > 0
# Add to the sum across sequences of the sum over positions where c appears
# For each position we want k to correspond to the number of positions that each character could be at if it were part of a motif. i.e. the character at position 0 in the sequence can't be the second position in the motif.
if j < (self._width - 1):
k = j + 1
my_range = range(1,k+1)
elif j > (len(sequence) - self._width):
#k = width - (distance from the end)
k = self._width - ((len(sequence) - 1) - j)
my_range = range(k,self._width+1)
else:
my_range = range(1,self._width+1)
# my_range represents a range over all the positions k in a motif that the character could be in
for k in my_range:
#j needs to equal where the starting position would be
# if k = 1, starting pos = j
# if k = 2, starting pos = j - 1
#starting_pos = j - (k - 1)
starting_pos = j - (k - 1)
# _n.loc[base,k] += _z[j,starting_pos]
_n.loc[base,k] += _z[i,starting_pos]
###### k = 0
# Populate n_c,0 with n_C (total # of c's in data set)
_n.loc[base,0] += 1
# After this nested loop, have one loop to subtract the sume of n_c,k for all k
for base in range(4):
_n.iloc[base,0] -= np.sum(_n.iloc[base,1:])
##### calculate p_matrix from n_matrix
for base in ['A','C','G','T']:
for position in range(self._P.shape[1]):
_p.loc[base,position] = (_n.loc[base,position] + self._pseudo)/(np.sum(_n.loc[:,position]) + self._pseudo*4)
log_likelihood = self._calc_log_likelihood(_p,_z)
return (log_likelihood, _p, _z)
################ Testing
#sequences = []
#with open('example1.txt', 'r') as infile:
# for line in infile:
# sequences.append(line.replace('\n',''))
#test = MEME(sequences, 3, 1)
## Main
# This is the main function provided for you.
# Define additional functions to implement MEME
def main(args):
# Parse input arguments
# It is generally good practice to validate the input arguments, e.g.,
# verify that the input and output filenames are provided
if args.sub_name == 'meme':
seq_file_path = args.sequences_filename
W = args.width
model_file_path = args.model
position_file_path = args.positions
subseq_file_path = args.subseqs
sequences = []
with open(seq_file_path, 'r') as infile:
for line in infile:
sequences.append(line.replace('\n',''))
my_meme = MEME(sequences, W, 1)
if args.start and len(args.start) == args.width:
if len(args.start) != args.width:
raise ValueError('Start must be same length as width')
my_meme.test(args.start)
else:
my_meme.exhaustive_subsequence()
positions = []
subseqs = []
for i,seq in enumerate(my_meme.z):
positions.append(np.argmax(seq))
subseqs.append(sequences[i][np.argmax(seq):(np.argmax(seq) + W + 1)])
with open(position_file_path,'w') as out_file:
for position in positions:
print(position, file=out_file)
with open(subseq_file_path,'w') as out_file:
for sequence in subseqs:
print(sequence, file=out_file)
with open(model_file_path, 'w') as out_file:
print(my_meme.p.to_csv(header=False,sep='\t'),file=out_file)
print(my_meme.p.to_csv(header=False,sep='\t',float_format="%.3f"))
if args.graph:
heights = seq_logo.seq_logo(my_meme.p)
plots = [0]*my_meme.p.shape[0]
for letter in range(my_meme.p.shape[0]):
plots[letter] = plt.bar(list(range(my_meme.p.shape[1])),
my_meme.p.iloc[letter,:],bottom=(np.sum(my_meme.p.iloc[0:letter,:]) if letter != 0 else 0))[0]
plt.legend(plots,('A','C','G','T'))
plt.ylim([0,2])
plt.ylabel('Bits')
plt.show()
elif args.sub_name == 'logo':
model_filename = args.model_filename
# Read file as csv with tab separator, treat the first column as the row names, and drop columns that are NaN
model = pd.read_csv(model_filename,header=None,sep='\t',index_col=0).dropna(axis=1)
heights = seq_logo.seq_logo(model)
plots = [0]*model.shape[0]
if args.graph:
for letter in range(model.shape[0]):
plots[letter] = plt.bar(list(range(model.shape[1])),
model.iloc[letter,:],bottom=(np.sum(model.iloc[0:letter,:]) if letter != 0 else 0))[0]
plt.legend(plots,('A','C','G','T'))
plt.ylim([0,2])
plt.ylabel('Bits')
plt.show()
else:
print(heights)
# Note: this syntax checks if the Python file is being run as the main program
# and will not execute if the module is imported into a different module
if __name__ == "__main__":
# Note: this example shows named command line arguments. See the argparse
# documentation for positional arguments and other examples.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(title='subcommands',dest='sub_name')
parser_em = subparsers.add_parser('meme',description=__doc__,help='Run meme algorithm')
parser_logo = subparsers.add_parser('logo',description=__doc__,help='Compute sequence logo')
parser_logo.add_argument('model_filename',
help='model file path.',
type=str)
parser_logo.add_argument('--graph',
help='Graph model?',
action='store_true',
default=False)
parser_em.add_argument('sequences_filename',
help='sequences file path.',
type=str)
parser_em.add_argument('--width',
help='width of the motif.',
type=int,
default=6)
parser_em.add_argument('--graph',
help='Graph model?',
action='store_true',
default=False)
parser_em.add_argument('--start',
help='Motif possibility to start with, must be same length as --width.',
type=str,
default=None)
parser_em.add_argument('--model',
help='model output file path.',
type=str,
default='model.txt')
parser_em.add_argument('--positions',
help='position output file path.',
type=str,
default='positions.txt')
parser_em.add_argument('--subseqs',
help='subsequence output file path.',
type=str,
default='subseqs.txt')
args = parser.parse_args()
# Note: this simply calls the main function above, which we could have
# given any name
main(args)
|
[
"nate.rich2@gmail.com"
] |
nate.rich2@gmail.com
|
a8f4da140844cda00490682847b76c6fddaa7724
|
5bd690929978bf1fbc3ab5c37c5ea77dd77ceecf
|
/blog/models.py
|
c0dc21b63b63c199dcb1bd71be68c436a1f4dfa1
|
[] |
no_license
|
zeferino90/django_tutorial
|
72b4849778ab55789241ef258f34f3d31345930a
|
69cfcec73cce53a73adb0ce8cf0fa5012e8d7f72
|
refs/heads/master
| 2020-12-31T07:32:14.937142
| 2016-05-18T11:31:03
| 2016-05-18T11:31:03
| 59,109,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now())
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
[
"mperesp1990@gmail.com"
] |
mperesp1990@gmail.com
|
bcdec5e496ad6f5abee019462653d2a2374ba54f
|
56b62b19f83d33c53ea75407fb43f3ba7e47948a
|
/server/tests/conf.py
|
2b425850d2b3827889506cac766cea03fcb44151
|
[] |
no_license
|
irs1318dev/irsScouting2017
|
73fc9b7696cbb76e8db95e24583331b046a7025a
|
cf2f0f75495916a74f3246e5119fa945f281c279
|
refs/heads/master
| 2021-01-16T00:28:41.380053
| 2020-02-26T04:29:54
| 2020-02-26T04:29:54
| 81,687,929
| 5
| 2
| null | 2019-02-18T21:11:35
| 2017-02-11T22:50:46
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""Commonly used settings
"""
# Server settings
host = "localhost"
port = "5432"
# Production Database Settings
user = "irs1318"
pw = "irs1318"
db = "scouting"
def_event = ("turing", "2017")
# Root Settings
root_user = "postgres"
root_pw = "irs1318"
root_db = "postgres"
# Test Settings
test_user = "irs1318test"
test_pw = "irs1318test"
test_db = "scouting_test"
test_event = ("test_event", "2017")
|
[
"stacy.k.irwin@gmail.com"
] |
stacy.k.irwin@gmail.com
|
0050a190e919c80e77c4c53c167500cd7131620c
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/knew.py
|
4912c6babbcef8ee5f2db46376577b4a90541f7c
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
ii = [('BentJDO2.py', 2), ('CookGHP3.py', 44), ('MarrFDI.py', 6), ('CoolWHM2.py', 8), ('KembFFF.py', 7), ('GodwWSL2.py', 76), ('ChanWS.py', 1), ('SadlMLP.py', 7), ('FerrSDO3.py', 21), ('WilbRLW.py', 19), ('WilbRLW4.py', 20), ('RennJIT.py', 3), ('AubePRP2.py', 13), ('CookGHP.py', 65), ('MartHSI2.py', 20), ('LeakWTI2.py', 1), ('KembFJ1.py', 13), ('WilkJMC3.py', 1), ('WilbRLW5.py', 34), ('LeakWTI3.py', 5), ('PettTHE.py', 2), ('MarrFDI3.py', 7), ('TennAP.py', 12), ('PeckJNG.py', 2), ('KnowJMM.py', 21), ('BailJD2.py', 21), ('AubePRP.py', 17), ('AdamWEP.py', 4), ('FitzRNS3.py', 3), ('WilbRLW2.py', 37), ('ClarGE2.py', 40), ('WilkJMC2.py', 3), ('CarlTFR.py', 24), ('LyttELD.py', 23), ('CoopJBT2.py', 33), ('TalfTAC.py', 5), ('GrimSLE.py', 5), ('RoscTTI3.py', 19), ('AinsWRR3.py', 20), ('CookGHP2.py', 50), ('KiddJAE.py', 3), ('AdamHMM.py', 18), ('BailJD1.py', 15), ('RoscTTI2.py', 12), ('CoolWHM.py', 24), ('MarrFDI2.py', 11), ('CrokTPS.py', 7), ('ClarGE.py', 59), ('LandWPA.py', 6), ('BuckWGM.py', 1), ('IrviWVD.py', 28), ('LyelCPG.py', 4), ('GilmCRS.py', 36), ('DaltJMA.py', 6), ('DibdTRL2.py', 18), ('AinsWRR.py', 16), ('CrocDNL.py', 35), ('MedwTAI.py', 13), ('LandWPA2.py', 11), ('WadeJEB.py', 13), ('FerrSDO2.py', 31), ('TalfTIT.py', 12), ('NewmJLP.py', 7), ('GodwWLN.py', 15), ('CoopJBT.py', 13), ('KirbWPW2.py', 2), ('SoutRD2.py', 26), ('BackGNE.py', 10), ('LeakWTI4.py', 6), ('LeakWTI.py', 3), ('MedwTAI2.py', 17), ('BachARE.py', 1), ('SoutRD.py', 21), ('DickCSG.py', 2), ('WheeJPT.py', 44), ('MereHHB3.py', 11), ('HowiWRL2.py', 8), ('BailJD3.py', 15), ('MereHHB.py', 6), ('WilkJMC.py', 6), ('HogaGMM.py', 8), ('MartHRW.py', 36), ('MackCNH.py', 8), ('FitzRNS4.py', 26), ('CoolWHM3.py', 11), ('DequTKM.py', 30), ('FitzRNS.py', 15), ('BentJRP.py', 9), ('EdgeMHT.py', 60), ('BowrJMM.py', 27), ('LyttELD3.py', 12), ('HallFAC.py', 4), ('FerrSDO.py', 20), ('RoscTTI.py', 10), ('ThomGLG.py', 22), ('StorJCC.py', 7), ('KembFJ2.py', 13), ('LewiMJW.py', 19), ('BabbCRD.py', 17), ('MackCNH2.py', 17), ('BellCHM.py', 3), ('HaliTBC.py', 9), ('WilbRLW3.py', 39), ('AinsWRR2.py', 24), ('MereHHB2.py', 9), ('BrewDTO.py', 1), ('JacoWHI.py', 2), ('ClarGE3.py', 40), ('RogeSIP.py', 16), ('MartHRW2.py', 35), ('DibdTRL.py', 38), ('FitzRNS2.py', 35), ('HogaGMM2.py', 12), ('MartHSI.py', 31), ('EvarJSP.py', 14), ('DwigTHH.py', 9), ('NortSTC.py', 18), ('SadlMLP2.py', 1), ('BowrJMM2.py', 25), ('BowrJMM3.py', 8), ('BeckWRE.py', 8), ('TaylIF.py', 9), ('WordWYR.py', 7), ('DibdTBR.py', 4), ('ChalTPW.py', 1), ('ThomWEC.py', 6), ('KeigTSS.py', 21), ('WaylFEP.py', 2), ('BentJDO.py', 4), ('ClarGE4.py', 46), ('HowiWRL.py', 26)]
|
[
"prabhjyotsingh95@gmail.com"
] |
prabhjyotsingh95@gmail.com
|
cd7edd607511c4ceb2f03d8021a7dc766ee99b53
|
55fc1cf505abd993a5271e9c607b0b744345c71c
|
/web_flask/0-hello_route.py
|
4ebdee0bbcb0a956093e58c93720f2c9c156cd09
|
[] |
no_license
|
scurry222/AirBnB_clone_v2
|
e8469639920219a83ff94033e388aea9d4e87ec6
|
90683b32a48e580f7948ad062be837d523c7e86b
|
refs/heads/master
| 2020-07-03T17:42:09.458200
| 2019-09-05T01:10:58
| 2019-09-05T01:10:58
| 201,990,874
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
#!/usr/bin/python3
""" starts a web application """
from flask import Flask
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hello_flask():
""" returns intro string """
return "Hello HBNB!"
if __name__ == "__main__":
app.run()
|
[
"scurry222@gmail.com"
] |
scurry222@gmail.com
|
061edeb101c45ea71f83da9acd8e7f2de792d075
|
792b3fcf3c83c37e26dd5f6bfbe7716018c8c571
|
/Server/Routes/sockets.py
|
bde9ea55b819e9844a0d623739404df0533105c1
|
[] |
no_license
|
kevin-fang/PillUp
|
afcad8d5e9d21f6a9b0b764e480a9f58c480b76c
|
5b4b11fb9f9206aad47f4a316c41f817cddc9711
|
refs/heads/master
| 2023-01-06T20:12:57.328896
| 2021-08-10T22:16:17
| 2021-08-10T22:16:17
| 148,874,677
| 2
| 0
| null | 2023-01-04T12:51:03
| 2018-09-15T06:02:02
|
Python
|
UTF-8
|
Python
| false
| false
| 775
|
py
|
from Utilities.NotificationCenter import NotificationCenter as Notification
from main import socketio
import json
@Notification.notify_on('dispense')
def dispense(patient, medicine):
try:
data = patient.to_json()
data['medicine'] = medicine.to_json()
data = json.dumps(data)
socketio.emit('dispense', data, broadcast=True, namespace='/stream')
except:
pass
@Notification.notify_on('refill')
def dispense(patient, medicine):
try:
data = patient.to_json()
data['medicine'] = medicine.to_json()
data = json.dumps(data)
socketio.emit('refill', data, broadcast=True, namespace='/stream')
except:
pass
@socketio.on('connect', namespace='/stream')
def handle_connect():
pass
|
[
"sh.taslim@gmail.com"
] |
sh.taslim@gmail.com
|
4b543224375ff819cf154581c26265bf3182c556
|
7a27a6d95f3cf2ec6952f40d6e8707e49cb52602
|
/litex/build/altera/platform.py
|
1d8898655046b168fc3bf5ba6ee11707753253c9
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
davidcorrigan714/litex
|
d2dd84f313e5ad545a59bb84c53174ab77165fc3
|
a6fd7b5d3714824bfcaaabdd752a8c5dc4bbbdff
|
refs/heads/master
| 2023-01-10T04:31:52.694859
| 2020-11-09T02:34:10
| 2020-11-09T02:34:10
| 284,874,101
| 0
| 1
|
NOASSERTION
| 2020-08-13T05:26:00
| 2020-08-04T04:09:36
|
C
|
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
#
# This file is part of LiteX.
#
# Copyright (c) 2015-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2019 msloniewski <marcin.sloniewski@gmail.com>
# SPDX-License-Identifier: BSD-2-Clause
import os
from litex.build.generic_platform import GenericPlatform
from litex.build.altera import common, quartus
# AlteraPlatform -----------------------------------------------------------------------------------
class AlteraPlatform(GenericPlatform):
bitstream_ext = ".sof"
create_rbf = True
def __init__(self, *args, toolchain="quartus", **kwargs):
GenericPlatform.__init__(self, *args, **kwargs)
self.ips = set()
if toolchain == "quartus":
self.toolchain = quartus.AlteraQuartusToolchain()
else:
raise ValueError("Unknown toolchain")
def add_ip(self, filename):
self.ips.add((os.path.abspath(filename)))
def get_verilog(self, *args, special_overrides=dict(), **kwargs):
so = dict(common.altera_special_overrides)
so.update(special_overrides)
return GenericPlatform.get_verilog(self, *args, special_overrides=so, **kwargs)
def build(self, *args, **kwargs):
return self.toolchain.build(self, *args, **kwargs)
def add_period_constraint(self, clk, period):
if clk is None: return
if hasattr(clk, "p"):
clk = clk.p
self.toolchain.add_period_constraint(self, clk, period)
def add_false_path_constraint(self, from_, to):
if hasattr(from_, "p"):
from_ = from_.p
if hasattr(to, "p"):
to = to.p
self.toolchain.add_false_path_constraint(self, from_, to)
|
[
"florent@enjoy-digital.fr"
] |
florent@enjoy-digital.fr
|
8c7ce204924a3462703bfebf3cb2f66d8dc93ceb
|
4d6a249d9389406e25849574d9b0fde9a978359c
|
/tests/callable/publisher_function/test_subscriber_staticmethod.py
|
633e112b3ac18cd6f48c855a7c400154b7cffd50
|
[
"Apache-2.0"
] |
permissive
|
basecue/cue
|
8eb80b12f381a4a54755bc46d14311182a57e34b
|
9d425c8e0fc306f5d48336a3f4ca66ed16d52b3c
|
refs/heads/main
| 2023-02-05T22:38:35.681848
| 2020-12-30T20:25:41
| 2020-12-30T20:25:41
| 291,537,586
| 0
| 0
| null | 2020-12-30T20:25:43
| 2020-08-30T19:27:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
from types import SimpleNamespace
import pytest
from cue import publisher, subscribe
@pytest.fixture
def setup():
@publisher
def event(text: str, flag: bool = True):
return text, flag
class Subscriber:
subscribers = SimpleNamespace(
on_event_staticmethod=[],
)
@subscribe(event)
@staticmethod
def on_event_staticmethod(text: str, flag: bool = True):
Subscriber.subscribers.on_event_staticmethod.append((text, flag))
return event, Subscriber
def test(setup):
event, Subscriber = setup
return_value = event('text', flag=False)
assert return_value == ("text", False)
assert Subscriber.subscribers.on_event_staticmethod == [
('text', False)
]
def test_subscriber_instance(setup):
event, Subscriber = setup
subscriber = Subscriber()
subscriber_2 = Subscriber()
return_value = event('text', flag=False)
assert return_value == ("text", False)
assert Subscriber.subscribers.on_event_staticmethod == [
('text', False)
]
|
[
"noreply@github.com"
] |
basecue.noreply@github.com
|
c074f5c57031ec03570f3131d545e1442de6317b
|
d66100c02073f051ceff3edf0845eb702cb6ed5f
|
/qida/models.py
|
23928af25ef7083debc0670d934bbd8a418eeff2
|
[] |
no_license
|
liubq919/VisualizeLogStatistics
|
1c22ec12c3fc0decaa47518b1cc4ade0ede24a47
|
5b2499e47474a8c89409643e2aab961bba5d5204
|
refs/heads/master
| 2021-01-10T08:44:49.284076
| 2016-03-09T02:58:56
| 2016-03-09T02:58:56
| 53,462,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,035
|
py
|
# -*- coding: utf-8 -*-
from mongoengine import *
__author__ = 'liubq'
class EveryFifteenMinutes(EmbeddedDocument):
hour = IntField(required=True)
counter1 = IntField(required=True, default=0)
counter2 = IntField(required=True, default=0)
counter3 = IntField(required=True, default=0)
counter4 = IntField(required=True, default=0)
class QidaEveryFifMinLoginCount(Document):
date = StringField(required=True)
info = ListField(EmbeddedDocumentField(EveryFifteenMinutes))
class QidaOSInfo(Document):
date = StringField(required=True)
# desktop os
windowsxp = IntField(required=True, default=0)
windowsvista = IntField(required=True, default=0)
windows7 = IntField(required=True, default=0)
windows8 = IntField(required=True, default=0)
windows8_1 = IntField(required=True, default=0)
windows10 = IntField(required=True, default=0)
windowsOthers = IntField(required=True, default=0)
windows = IntField(required=True, default=0)
osx = IntField(required=True, default=0)
# mobile/tablet os
iPhone = IntField(required=True, default=0)
iPad = IntField(required=True, default=0)
android = IntField(required=True, default=0)
# other os
osOthers = IntField(required=True, default=0)
meta = {
'collection': 'qida_os_info'
}
class QidaBrowserInfo(Document):
date = StringField(required=True)
ie6 = IntField(required=True, default=0)
ie7 = IntField(required=True, default=0)
ie8 = IntField(required=True, default=0)
ie9 = IntField(required=True, default=0)
ie10 = IntField(required=True, default=0)
ie11 = IntField(required=True, default=0)
edge = IntField(required=True, default=0)
firefox = IntField(required=True, default=0)
chrome = IntField(required=True, default=0)
safari = IntField(required=True, default=0)
others = IntField(required=True, default=0)
class QidaLoginCount(Document):
date = StringField(required=True)
times = IntField(required=True, default=0)
|
[
"liubq919@163.com"
] |
liubq919@163.com
|
786ab8024912c38318e08fc8049d53e37a5b1fae
|
050fc5ca698dfd7612dee42aa980fc7b5eee40a2
|
/skywalking/plugins/sw_aiormq.py
|
4b32c562651bb98bba2d06ce8a20f795c85f3101
|
[
"Apache-2.0"
] |
permissive
|
apache/skywalking-python
|
8ac6ce06630c519f9984a45e74c1fcc88cf5b9d6
|
1a360228c63cd246dd4c5dd8e1f09bdd5556ad7d
|
refs/heads/master
| 2023-09-05T02:45:56.225937
| 2023-08-28T22:19:24
| 2023-08-28T22:19:24
| 261,456,329
| 178
| 122
|
Apache-2.0
| 2023-08-28T22:19:26
| 2020-05-05T12:13:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,888
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from skywalking import Layer, Component
from skywalking.trace.carrier import Carrier
from skywalking.trace.context import get_context
from skywalking.trace.tags import TagMqBroker, TagMqTopic, TagMqQueue
link_vector = ['https://pypi.org/project/aiormq/']
support_matrix = {
'aiormq': {
'>=3.7': ['6.3', '6.4']
}
}
note = """"""
def install():
from aiormq import Channel
from aiormq.tools import awaitable
try:
from pamqp import commands as spec # aiormq v6.4.1
except ImportError:
from pamqp import specification as spec # aiormq v3.3.1
async def _sw_basic_publish(self, body, exchange='', routing_key='', properties=None, **kwargs):
url = self.connection.url
peer = f'{url.host}:{url.port}' if url.port else url.host
context = get_context()
with context.new_exit_span(op=f'RabbitMQ/Topic/{exchange}/Queue/{routing_key}/Producer',
peer=peer, component=Component.RabbitmqProducer) as span:
span.tag(TagMqBroker(peer))
span.tag(TagMqTopic(exchange))
span.tag(TagMqQueue(routing_key))
span.layer = Layer.MQ
carrier = span.inject()
if properties is None:
properties = spec.Basic.Properties(delivery_mode=1)
headers = getattr(properties, 'headers', None)
if headers is None:
headers = properties.headers = {}
for item in carrier:
headers[item.key] = item.val
return await _basic_publish(self, body, exchange=exchange, routing_key=routing_key, properties=properties, **kwargs)
async def _sw_basic_consume(self, queue, consumer_callback, *args, **kwargs):
async def _callback(msg):
context = get_context()
url = self.connection.url
peer = f'{url.host}:{url.port}' if url.port else url.host
exchange = msg.delivery.exchange
routing_key = msg.delivery.routing_key
headers = msg.header.properties.headers
carrier = Carrier()
for item in carrier:
if item.key in headers:
val = headers.get(item.key)
if val is not None:
item.val = val if isinstance(val, str) else val.decode()
with context.new_entry_span(op='RabbitMQ/Topic/' + exchange + '/Queue/' + routing_key
+ '/Consumer' or '', carrier=carrier) as span:
span.layer = Layer.MQ
span.component = Component.RabbitmqConsumer
span.tag(TagMqBroker(peer))
span.tag(TagMqTopic(exchange))
span.tag(TagMqQueue(routing_key))
return await awaitable(consumer_callback)(msg)
return await _basic_consume(self, queue, _callback, *args, **kwargs)
_basic_publish = Channel.basic_publish
_basic_consume = Channel.basic_consume
Channel.basic_publish = _sw_basic_publish
Channel.basic_consume = _sw_basic_consume
|
[
"noreply@github.com"
] |
apache.noreply@github.com
|
75b37980676a19f8650268bda87ff9eb82f75b43
|
5d9038f6e4ef1082a264bbb10c2421245240b21a
|
/test/test2.py
|
ecddee7c85e65d7ac8d4078e47d12e0a9854afa3
|
[] |
no_license
|
changyubiao/myalgorithm-demo
|
ee9c8f559d61604ec8f4805785adf1a569d031a4
|
88c374d9062d17266abf75b155e42859c5e3d078
|
refs/heads/master
| 2022-12-08T18:47:23.681537
| 2020-09-20T14:31:04
| 2020-09-20T14:31:04
| 160,837,582
| 1
| 0
| null | 2022-12-08T06:38:18
| 2018-12-07T14:58:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Time : 2018/12/16 09:55
@File : test2.py
@Author : frank.chang@shoufuyou.com
info = [
{"name": "laoda", "age": 12, "height": "175"},
{"name": "laoda", "age": 22, "height": "180"},
{"name": "laoda", "age": 42, "height": "160"},
]
"""
from operator import itemgetter, attrgetter
class Student:
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
def __str__(self):
return '%s(name:%s,age:%s,score:%s)' % (self.__class__.__name__, self.name, self.age, self.score)
__repr__ = __str__
if __name__ == '__main__':
std1 = Student("A", 11, 23)
std2 = Student("B", 13, 10)
std3 = Student("C", 16, 15)
std4 = Student("D", 34, 4)
students = [std1, std2, std3, std4]
students.sort(key=lambda student: student.score, reverse=True)
# print(students)
for item in students:
print(item)
print(sorted(students, key=attrgetter('age'), reverse=True))
|
[
"frank.chang@shoufuyou.com"
] |
frank.chang@shoufuyou.com
|
28613b7868c467dcf529cc1ac0d62aa02ddc6084
|
8956e74841d05e3eb69018fe212258ff4cd77879
|
/store/views/contact.py
|
97037ad8dadf2c794adccb079e8aba4a83c9f477
|
[] |
no_license
|
Manishkd04/py-estore
|
b66ca2e1343616c1e88c9c8acc74943012d37813
|
aadd736157334a146a8fcca4f79ccb394b5cd6ec
|
refs/heads/master
| 2023-02-02T02:47:05.885373
| 2020-12-19T16:09:45
| 2020-12-19T16:09:45
| 322,549,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 914
|
py
|
from django.shortcuts import render, redirect
from django.contrib.auth.hashers import check_password
from store.models.customer import Customer
from django.views import View
from store.models.product import Product, Contact
from store.models.orders import Order
class Contactview(View):
def get(self, request):
return render(request, 'contact.html')
def post (self, request):
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
phone = request.POST.get('phone')
desc = request.POST.get('desc')
if request.method=="POST":
contact = Contact(first_name=first_name, last_name=last_name, email=email, phone=phone, desc=desc)
contact.save()
return render(request, 'contact.html')
# return redirect('/contact')
# def post(self, request):
|
[
"nashkumard5@gmail.com"
] |
nashkumard5@gmail.com
|
a9b8cd453824211daa9e5f60e01fec60b5424b1e
|
71a8ad2db4ea0c208bb40ba58777f0467a67b2ba
|
/numpy_bressert/ch02_scipy/solutions_to_funcs.py
|
c170907789bc6fe1db6190bd108026ed19297b08
|
[] |
no_license
|
olegzinkevich/programming_books_notes_and_codes
|
5a1384378f9428a04ea07c8c21efda5c8c2462d4
|
22395f7c83c9b561ec75e7ac8729f92444bd799b
|
refs/heads/main
| 2023-02-26T17:40:36.686058
| 2018-02-07T11:11:06
| 2018-02-07T11:11:06
| 335,343,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
# With data modeling and fitting under our belts, we can move on to finding solutions,
# such as “What is the root of a function?” or “Where do two functions intersect?” SciPy
# provides an arsenal of tools to do this in the optimize module.
# Let’s start simply, by solving for the root of an equation (see Figure 3-4). Here we will
# use scipy.optimize.fsolve
from scipy.optimize import fsolve
import numpy as np
# Find the roots of a function.
line = lambda x: x + 3
solution = fsolve(line, -2)
print(solution)
# Finding the intersection points between two equations is nearly as simple.3
# Defining function to simplify intersection solution
def findIntersection(func1, func2, x0):
return fsolve(lambda x : func1(x) - func2(x), x0)
# Defining functions that will intersect
funky = lambda x : np.cos(x / 5) * np.sin(x / 2)
line = lambda x : 0.01 * x - 0.5
# Defining range and getting solutions on intersection points
x = np.linspace(0,45,10000)
result = findIntersection(funky, line, [15, 20, 30, 35, 40, 45])
# Printing out results for x and y
print(result, line(result))
|
[
"zinkevicholeg@gmail.com"
] |
zinkevicholeg@gmail.com
|
962a7d8d4456f1429dcf5cf7858441c22b27f0df
|
7c33a847d6af570d076ee01edfaee581815fb23f
|
/listings/migrations/0001_initial.py
|
accf0d7af47503650181b62e10901c2ea885f588
|
[] |
no_license
|
ToughTiger/tbre
|
35baadd60050c903092101db77a7f0ad5ed94511
|
3585a3e708d342c34eb6c9de70c638686707997b
|
refs/heads/master
| 2023-04-18T10:51:58.019836
| 2021-05-03T09:51:11
| 2021-05-03T09:51:11
| 359,229,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,230
|
py
|
# Generated by Django 3.2 on 2021-04-20 09:30
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('address', models.CharField(max_length=120)),
('city', models.CharField(max_length=120)),
('state', models.CharField(blank=True, max_length=20)),
('pincode', models.CharField(max_length=20)),
('descripting', models.TextField(max_length=120)),
('price', models.IntegerField(default=0)),
('bedrooms', models.DecimalField(decimal_places=2, max_digits=2)),
('bathrooms', models.IntegerField()),
('parking', models.IntegerField(default=0)),
('builtupArea', models.DecimalField(decimal_places=2, max_digits=2)),
('carpetArea', models.DecimalField(decimal_places=2, max_digits=2)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='realtors.realtor')),
],
),
]
|
[
"santosh.cerdak@gmail.com"
] |
santosh.cerdak@gmail.com
|
dcd3c1fcde72e282819c6cf23917385107266f14
|
7d25b5877a615cad87ce4b606db1f06dba05afa6
|
/convert_to_snake_case.py
|
6bde2e631f695f88e174a6e78b8cd8732583e520
|
[] |
no_license
|
Thomd209/Convert_to_snake_case
|
c32b06c7edf9f27b6a0240d70e56a55b9a2b12bf
|
e2c6597f882daeede3a3f674cde3e4f109ba9beb
|
refs/heads/main
| 2023-04-04T06:49:16.845828
| 2021-04-14T21:37:53
| 2021-04-14T21:37:53
| 358,048,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
def convert_to_snake_case(s):
snake_case = ''
for i in range(len(s)):
if i == len(s) - 1:
snake_case += s[i]
continue
if s[i + 1].isupper() and s[i + 1].isalpha():
snake_case += s[i] + '_'
else:
snake_case += s[i]
snake_case = snake_case.lower()
return snake_case
camel_case = input()
result = convert_to_snake_case(camel_case)
print(result)
|
[
"thomd209@gmail.com"
] |
thomd209@gmail.com
|
e624584bc90951ce86e2b1625a67dc7648748b9f
|
5c90e8542121f110d595805116afc3ae9863f6bd
|
/Ada_LSN/operations.py
|
2efaca044053bf8901745863f945de769b6df2ee
|
[] |
no_license
|
philaWu/SDL-Skeleton
|
46d710f317a820a3bb193d218f96948534ddfd50
|
554bbfd4775422343ce7d7789ca22720a298d359
|
refs/heads/main
| 2023-06-26T09:01:31.518831
| 2021-07-26T10:48:18
| 2021-07-26T10:48:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,201
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
OPS = {
'skip': lambda C, stride: Identity(),
'conv1': lambda C, stride: Conv(C, C, 1, stride, 0),
'conv3': lambda C, stride: Conv(C, C, 3, stride, 1),
'conv5': lambda C, stride: Conv(C, C, 5, stride, 2),
'dconv3_2': lambda C, stride: DilConv(C, C, 3, stride, 2, 2),
'dconv3_4': lambda C, stride: DilConv(C, C, 3, stride, 4, 4),
'dconv3_8': lambda C, stride: DilConv(C, C, 3, stride, 8, 8),
'dconv5_2': lambda C, stride: DilConv(C, C, 5, stride, 4, 2),
'dconv5_4': lambda C, stride: DilConv(C, C, 5, stride, 8, 4),
'dconv5_8': lambda C, stride: DilConv(C, C, 5, stride, 16, 8),
}
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, inplace=True, affine=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=inplace),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding),
# nn.BatchNorm2d(C_out)
)
# for layer in self.op.modules():
# if isinstance(layer, nn.Conv2d):
# nn.init.xavier_normal_(layer.weight)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, inplace=False, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=inplace),
nn.Conv2d(C_in, C_out, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation),
# nn.BatchNorm2d(C_out, affine=affine),
)
# for layer in self.op.modules():
# if isinstance(layer, nn.Conv2d):
# nn.init.xavier_normal_(layer.weight)
def forward(self, x):
return self.op(x)
class Conv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, inplace=False, affine=True):
super(Conv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=inplace),
nn.Conv2d(C_in, C_out, kernel_size=kernel_size, stride=stride, padding=padding),
# nn.BatchNorm2d(C_out), # memory overflow if use this BN
)
# for layer in self.op.modules():
# if isinstance(layer, nn.Conv2d):
# nn.init.xavier_normal_(layer.weight)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, inplace=True, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=inplace)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0)
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0)
# self.bn = nn.BatchNorm2d(C_out)
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)
# out = self.bn(out)
return out
|
[
"noreply@github.com"
] |
philaWu.noreply@github.com
|
2d80f265944c4dd17736144a3090e076a6c797d3
|
d59cb746bb746c4ed87d5b222c7bde0688d95456
|
/todo/tasks/migrations/0002_task_done.py
|
6de5bb6d3ee400fa11fed9db9dd5bb0b502e3514
|
[] |
no_license
|
quiqueporta/devscola-djangorest
|
0eea64f30b49554c05bd508767be38ea4ab9cc41
|
4121e7a7c82721c730e70f042d95d6f4696da814
|
refs/heads/master
| 2020-03-27T06:19:36.761044
| 2018-08-25T13:27:09
| 2018-08-25T13:34:47
| 146,097,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# Generated by Django 2.1 on 2018-08-25 12:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='done',
field=models.BooleanField(blank=True, default=False),
),
]
|
[
"quiqueporta@gmail.com"
] |
quiqueporta@gmail.com
|
23f10a81cd196cdcebaab2c24f29d41451239302
|
ee409492c8e6370ee883b6fa430fde3ec0bd7a73
|
/sys/day8/file.py
|
be6f36086354d38bdacffc5daae7cf8dde891547
|
[] |
no_license
|
n30curry/python
|
0e9aa681e94dafe6b39d703e751a80891ac4e708
|
8ab8e37fec6ae320e5c2f647182e4a24165dcf8d
|
refs/heads/master
| 2020-11-25T21:38:36.949822
| 2016-09-07T02:09:59
| 2016-09-07T02:09:59
| 66,701,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
#!/usr/bin/python
import re
a = open('dict.txt','r')
b = a.readlines()
print type(b)
|
[
"1070380869@qq.com"
] |
1070380869@qq.com
|
083a6080a763233b225992d29eee723297afd443
|
3a5d2b0b840f138b1813649bf54626ab3bdc66da
|
/admin_login.py
|
86c0bab40c7bf029b3c63bef50ae59340ffded92
|
[] |
no_license
|
deadline-dealt/ip_project
|
37532d67e85f066ee7c43c0c09e62f298a5f4f9d
|
b088b722f57bec8886591699a925a27084bd0d56
|
refs/heads/main
| 2023-03-24T07:49:43.380894
| 2021-03-29T16:18:48
| 2021-03-29T16:18:48
| 352,696,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
import tkinter as tk
from tkinter import *
from tkinter import messagebox
from PIL import ImageTk,Image
root=tk.Tk()
root.title("Admin Login")
root.geometry("1000x500+200+100")
#==========background_img==================
photo = ImageTk.PhotoImage(Image.open("/home/varun/varun-files/ip project001/images/loginp.jpg"))
root=Label(image=photo)
root.place(x=0,y=0,relwidth=1,relheight=1)
#===========username===============
username=Label(text="username",font=("times new roman",18,"bold","italic"))
username.place(x=75,y=208)
username_input=Entry(font=("times new roman",18))
username_input.place(x=240,y=208)
#===========Password============
Password=Label(text="Password",font=("times new roman",18,"bold","italic"))
Password.place(x=75,y=260)
passwrd_input=Entry(show="*",font=("times new roman",18))
passwrd_input.place(x=240,y=260)
#==========Login-work==============
def Login():
if (username_input.get()=="nithish" and passwrd_input.get()=="nithish"):
messagebox.showinfo("","sucessfully logined")
messagebox.showinfo("good day","welcome back Nithish")
import admin_menu.py
elif (username_input.get()=="varun" and passwrd_input.get()=="varun"):
messagebox.showinfo("","sucessfully logined")
messagebox.showinfo("good day","welcome back Varun")
import admin_menu.py
elif (username_input.get()=="harshavardhan" and passwrd_input.get()=="harshavardhan"):
messagebox.showinfo("","sucessfully logined")
messagebox.showinfo("good day","welcome back Harshavardhan")
import admin_menu.py
else:
messagebox.showinfo("error","Incorrect username/password")
#========login_button==========0
Login_bt=Button(text="Login",command= Login ,bg="white",fg="red",font=("",13,"bold"))
Login_bt.place(x=260,y=320)
root.mainloop()
|
[
"noreply@github.com"
] |
deadline-dealt.noreply@github.com
|
7b0b098c83e893f44397072c1651e786b859c43e
|
5ddb33c0aed2dbfac990b5cbe35861c91ec25548
|
/dynn/data/mnist.py
|
126220df89f2992934441526595b87c08313e0e0
|
[
"MIT"
] |
permissive
|
pmichel31415/dynn
|
984b0fdfe45bf77f079248d1678d1aaf0dbf3649
|
7e780c4dcf928cdff5f2e52210409d2775ca7796
|
refs/heads/master
| 2022-11-01T08:58:53.432236
| 2018-12-11T18:06:15
| 2018-12-11T18:06:15
| 106,301,897
| 1
| 1
|
MIT
| 2022-10-26T17:18:51
| 2017-10-09T15:32:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,201
|
py
|
#!/usr/bin/env python3
"""
MNIST
^^^^^
Various functions for accessing the
`MNIST <http://yann.lecun.com/exdb/mnist/>`_ dataset.
"""
import os
import struct
import gzip
from io import BytesIO
import array
import numpy as np
from .data_util import download_if_not_there
mnist_url = "http://yann.lecun.com/exdb/mnist/"
mnist_files = {
"train_img": "train-images-idx3-ubyte.gz",
"train_lbl": "train-labels-idx1-ubyte.gz",
"test_img": "t10k-images-idx3-ubyte.gz",
"test_lbl": "t10k-labels-idx1-ubyte.gz",
}
def download_mnist(path=".", force=False):
"""Downloads MNIST from "http://yann.lecun.com/exdb/mnist/"
Args:
path (str, optional): Local folder (defaults to ".")
force (bool, optional): Force the redownload even if the files are
already at ``path``
"""
# Download all files sequentially
for filename in mnist_files.values():
download_if_not_there(filename, mnist_url, path, force=force)
def read_mnist(split, path):
"""Iterates over the MNIST dataset
Example:
.. code-block:: python
for image in read_mnist("train", "/path/to/mnist"):
train(image)
Args:
split (str): Either ``"training"`` or ``"test"``
path (str): Path to the folder containing the ``*-ubyte`` files
Returns:
tuple: image, label
"""
# Adapted from https://gist.github.com/akesling/5358964
if not (split is "test" or split is "train"):
raise ValueError("split must be \"train\" or \"test\"")
abs_path = os.path.abspath(path)
fname_img = os.path.join(abs_path, mnist_files[f"{split}_img"])
fname_lbl = os.path.join(abs_path, mnist_files[f"{split}_lbl"])
with open(fname_lbl, "rb") as zflbl:
flbl = BytesIO(gzip.decompress(zflbl.read()))
_, _ = struct.unpack(">II", flbl.read(8))
data = array.array("B", flbl.read())
lbl = np.asarray(data, dtype=np.uint8)
with open(fname_img, "rb") as zfimg:
fimg = BytesIO(gzip.decompress(zfimg.read()))
_, _, rows, cols = struct.unpack(">IIII", fimg.read(16))
data = array.array("B", fimg.read())
img = np.multiply(
np.asarray(data, dtype=np.uint8).reshape(len(lbl), rows, cols, 1),
1.0 / 255.0
)
def get_img(idx): return (img[idx], lbl[idx])
for i in range(len(lbl)):
yield get_img(i)
def load_mnist(path):
"""Loads the MNIST dataset
Returns MNIST as a dictionary.
Example:
.. code-block:: python
mnist = load_mnist(".")
# Train images and labels
train_imgs, train_labels = mnist["train"]
# Test images and labels
test_imgs, test_labels = mnist["test"]
The images are represented as numpy arrays and the labels as
integers.
Args:
path (str): Path to the folder containing the ``*-ubyte.gz`` files
Returns:
dict: MNIST dataset
"""
splits = {}
# Read data
for split in ["train", "test"]:
data = list(read_mnist("train", path))
images = [img for img, _ in data]
labels = [lbl for _, lbl in data]
splits[split] = (images, labels)
return splits
|
[
"pmichel31415@gmail.com"
] |
pmichel31415@gmail.com
|
e41d656de70cbce44fc05b492b78337f13b6036b
|
4bad9a1d18d0c0071b4a894fa0e181a4a77fece8
|
/app/plans/ramp.py
|
c13924d7e307464d3db41876dbfa6b9f510a5945
|
[] |
no_license
|
bauersmatthew/olf
|
ae2d91514fb31664325cb62a05dea77baed224aa
|
aa472d1d68014976532532460308789aceb94e73
|
refs/heads/main
| 2023-07-22T16:45:25.107692
| 2021-09-10T04:12:43
| 2021-09-10T04:12:43
| 404,792,153
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,875
|
py
|
from errors import *
import random
import copy
def order_from_spec(spec):
if isinstance(spec, str):
return [spec]
elif isinstance(spec, list):
shuffle = False
items = spec
else:
assert isinstance(spec, dict)
shuffle = spec.get('shuffle', False)
items = spec['items']
items = copy.copy(items) # shallow ok
if shuffle:
random.shuffle(items)
out = []
for x in items:
out += order_from_spec(x)
return out
def default_order_spec(names):
pfo = None
if 'pfo' in names: pfo = 'pfo'
elif 'paraffin' in names: pfo = 'paraffin'
if pfo is None: return [names]
return [pfo, {'shuffle': True, 'items': [x for x in names if x != pfo]}]
def try_typecasting(cfg, *args):
a = []
for k, ty in args:
try:
a.append(ty(cfg[k]))
except:
raise ParseError(f'Failed to typecast param: {k}')
return tuple(a)
def deep_update(a, b):
for k, v in b.items():
if k not in a:
a[k] = v
else:
va = a[k]
if isinstance(va, dict):
assert isinstance(v, dict)
deep_update(va, v)
else:
a[k] = v
class VialOdor:
def __init__(self, odors, carrier_rate, odor_rate):
self.order = sorted(odors, key=lambda o: o.conc_num())
self.carrier_rate = carrier_rate
self.odor_rate = odor_rate
def insert_steps(self, plan, prestimt, poststimt, stimt):
for o in self.order:
plan.push(f'Odor: {o}')
plan += [
AdjustOdorFlowStep(o, self.carrier_rate, self.odor_rate),
WaitStep(prestimt),
OdorStep(o, stimt),
WaitStep(poststimt)
]
plan.pop()
def parse_float_or_percent(x, of_what):
if isinstance(x, str):
assert x[-1] == '%'
prop = float(x[:-1])/100
return prop*of_what
else:
return float(x)
class GasOdor:
def __init__(self, odor, cfg):
self.odor = odor
tot = float(cfg['total-flow'])
oflows = []
for x in cfg['odor-flows']:
oflows.append(parse_float_or_percent(x, tot))
cflows = [tot-x for x in oflows]
self.rates = sorted(
list(zip(cflows, oflows)),
key = lambda x: x[1]
)
def insert_steps(self, plan, prestimt, poststimt, stimt):
for carrier_rate, odor_rate in self.rates:
pc = round(100*odor_rate/(odor_rate+carrier_rate))
plan.push(f'Odor: {self.odor} at {pc}%')
plan += [
AdjustOdorFlowStep(self.odor, carrier_rate, odor_rate),
WaitStep(prestimt),
OdorStep(self.odor, stimt),
WaitStep(poststimt)
]
@plan('Concentration Ramp', 'ramp')
def concentration_ramp_plan(odors, cfg_):
names = list(set([o.name for o in odors]))
cfg = {
'order': default_order_spec(names),
'gasses': {},
'nblocks': 3,
'timing': {
'stim': 2.0,
'begin-movie': 30.0,
'pre-stim': 30.0,
'post-stim': 30.0,
'rest': 120.0
},
'default-flow': {
'total-flow': 2000.0,
'odor-flow': '10%'
}
}
deep_update(cfg, cfg_)
nb, timing = try_typecasting(cfg,
('nblocks', int), ('timing', dict)
)
stimt, begint, prestimt, poststimt, restt = try_typecasting(timing,
('stim', float), ('begin-movie', float),
('pre-stim', float), ('post-stim', float),
('rest', float)
)
default_flow_cfg = dict(cfg['default-flow'])
vial_total_rate, = try_typecasting(default_flow_cfg, ('total-flow', float))
vial_odor_rate = parse_float_or_percent(default_flow_cfg['odor-flow'], vial_total_rate)
vial_carrier_rate = vial_total_rate - vial_odor_rate
order_spec = order_from_spec(cfg['order'])
order = []
for x in order_spec:
assert x in names
relevant = [o for o in odors if o.name == x]
if x in cfg['gasses']:
assert len(relevant) == 1
order.append(GasOdor(relevant[0], cfg['gasses'][x]))
else:
assert len(relevant) > 0
order.append(VialOdor(relevant, vial_carrier_rate, vial_odor_rate))
sl = StepList()
for i in range(nb):
sl.push(f'Block {i+1}')
sl += [
MirrorDownStep(),
ScopeOnStep()
]
if i == 0:
sl.append(WaitStep(begint))
for o in order:
o.insert_steps(sl, prestimt, poststimt, stimt)
sl += [
ScopeOffStep(),
MirrorUpStep()
]
if i+1 < nb:
sl.append(WaitStep(restt))
sl.pop()
return sl
|
[
"bauer.s.matthew@gmail.com"
] |
bauer.s.matthew@gmail.com
|
43d980dd88b22f0291fd9f0ef7279c68ae1ca292
|
3417f1ad3580519829172af19d7ce01e7972b4b7
|
/manage.py
|
1dd101fa79b5ad02c51c906177c5f98e305ff091
|
[] |
no_license
|
min-afk/noob
|
b54eec904da9e5beb8459a74d1201c299314d673
|
0af044509878dfd9c04b179957daf67e33750f81
|
refs/heads/master
| 2021-03-29T11:31:09.623974
| 2020-03-17T12:04:00
| 2020-03-17T12:04:00
| 247,950,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'noob.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"minotodor@gmail.com"
] |
minotodor@gmail.com
|
634a8fda1b878503bf27d2455a97bc7b1ebf595a
|
e5450be6c3a071b81b57ac640b756f707b44cc92
|
/server/resources/portfolio/education.py
|
6218f0af78db05b0f31680a66a1010f2043ba15a
|
[] |
no_license
|
karmadilos/portfolio-market
|
9fec4c5b0de6c8d9d894601076f3de7de569d64b
|
2e82c5aff59a607843e78e60b361fec97fb05d8a
|
refs/heads/master
| 2023-04-09T16:19:50.945833
| 2021-03-08T12:44:32
| 2021-03-08T12:44:40
| 362,552,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,536
|
py
|
from flask import jsonify, request
from flask_restful import reqparse, abort, Api, Resource
from flask_jwt_extended import jwt_required
from flask_jwt_extended import get_jwt_identity
from database.models.education import Education
from database.db import db
from flask_jwt_extended import JWTManager
import datetime
import maya
jwt = JWTManager()
keys = [
"id",
"user_id",
"school_name",
"major",
"status",
"create_date",
"updated_date",
]
# 토큰 방식으로 구현 후, post/put/delete 작업은
# 토큰 검증방식을 거친 후 접근할 수 있도록 처리하는 것 필요
class EducationApi(Resource):
def get(self, user_id, id=None):
# id값이 주어지는 경우에는 하나의 데이터만 return, 200
if not id:
educations = (
db.session.query(Education)
.filter_by(user_id=user_id)
.order_by(Education.create_date)
.all()
)
else:
educations = db.session.query(Education).filter_by(user_id=user_id, id=id)
# 데이터가 없는 초기의 경우에는 빈 배열만 return, 200
if not educations:
jsonify(status="success", data=[])
result = [{key: getattr(v, key) for key in keys} for v in educations]
return jsonify(
status="success",
educations=result,
)
@jwt_required()
def post(self, user_id):
if get_jwt_identity() != int(user_id):
abort(401, status="fail", message="접근 권한이 없습니다.")
# school_name, major, status = dict(request.get_json(force=True)).values()
# print(request.header.get("csrf-access-token"))
print(request.cookies, request.headers)
education = Education(user_id)
db.session.add(education)
db.session.commit()
return jsonify(
status="success",
result={key: getattr(education, key) for key in keys},
)
@jwt_required()
def put(self, user_id, id=None):
if get_jwt_identity() != int(user_id):
abort(401, status="fail", message="접근 권한이 없습니다.")
# 여러개의 데이터를 동시에 수정한다. (data에 배열로 수정 내용을 입력받음)
data = request.get_json(force=True)
print(data)
for v in data:
v["updated_date"] = datetime.datetime.utcnow()
v["create_date"] = maya.parse(v["create_date"]).datetime()
db.session.query(Education).filter_by(id=v["id"]).update(v)
db.session.commit()
return jsonify(
status="success",
result={"id": list(map(lambda x: x["id"], data))},
)
@jwt_required()
def delete(self, user_id, id):
if get_jwt_identity() != int(user_id):
abort(401, status="fail", message="접근 권한이 없습니다.")
if not id:
abort(400, status="fail", message="삭제할 데이터가 없습니다.")
education = Education.query.filter_by(user_id=user_id, id=id).first()
# 해당 학위 정보가 존재하지 않으면 response 400
if not education:
abort(400, status="fail", message="잘못된 ID입니다.")
# 삭제할 학위 정보가 존재하면 삭제 후 response 200
db.session.delete(education)
db.session.commit()
return jsonify(
status="success",
result={"id": id, "user_id": user_id},
)
|
[
"swj960515@gmail.com"
] |
swj960515@gmail.com
|
b62c10fe3f323f7df0fb2e4d3ffbe716839c228a
|
d1c0c6f9f2a9297639825138f8a62aa7a6e71a03
|
/PythonMoora/management/hasiltes/views.py
|
c6ffdf481e9ca2fd892353945ad9fb7e86a81cb7
|
[] |
no_license
|
yunuslon/SPKDataSiswa
|
bd147280c4f141e193554da499aa45ccf9219a4c
|
e2af29d7d6777460d285207f11ebb6c7d2b926c1
|
refs/heads/master
| 2020-04-08T02:01:47.689577
| 2018-12-19T07:55:32
| 2018-12-19T07:55:32
| 158,919,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,919
|
py
|
from django.shortcuts import render, redirect, get_list_or_404
from django.views.generic import View
from django.http import HttpResponse
from django.contrib import messages
from orm.models import HasilTes,Siswa
from .forms import HasilTesForm
from library.view import ManagementAccessView
# Create your views here.
class ListHasilTesView(ManagementAccessView):
def get(self, request):
template = 'hasiltes/index.html'
form = HasilTesForm(request.POST or None)
hasiltes = HasilTes.objects.all()
data = {
'form_mode' : 'add',
'form' : form,
'siswa' : Siswa.objects.all(),
'hasiltes' : hasiltes,
}
return render(request, template, data)
class SaveHasilTesView(ManagementAccessView):
def post(self, request):
template = 'hasiltes/index.html'
form = HasilTesForm(request.POST or None)
if form.is_valid():
# dari ngambil data
# sampai simpan data
hasiltes = HasilTes()
hasiltes.siswa = form.cleaned_data['siswa']
hasiltes.mata_pelajaran = form.cleaned_data['mata_pelajaran']
hasiltes.nilai = form.cleaned_data['nilai']
messages.add_message(request, messages.INFO, 'Data Berhasil Disimpan')
hasiltes.save()
return redirect('hasiltes:view')
else:
hasiltes = HasilTes.objects.all()
data = {
'form': form,
'hasiltes': hasiltes,
}
messages.add_message(request, messages.INFO, 'Data Gagal Disimpan !!')
return render(request, template, data)
class EditHasilTesView(ManagementAccessView):
template = 'hasiltes/edit.html'
def get(self, request, id):
hasiltes = HasilTes.objects.filter(id=id)
if not hasiltes.exists():
return redirect('hasiltes:view')
hasiltes = hasiltes.first()
initial = {
'id': hasiltes.id,
'mata_pelajaran': hasiltes.mata_pelajaran,
'nilai': hasiltes.nilai,
'siswa': hasiltes.siswa,
}
form = HasilTesForm(initial=initial)
hasiltes = HasilTes.objects.all()
data = {
'id':id,
'form': form,
'form_mode' : 'edit',
'hasiltes' : hasiltes,
}
return render(request, self.template, data)
class UpdateHasilTesView(ManagementAccessView):
def post(self, request):
template = "hasiltes/index.html"
form = HasilTesForm(request.POST or None)
if form.is_valid():
id = form.cleaned_data['id']
hasiltes = HasilTes.objects.get(pk=id)
hasiltes.mata_pelajaran = form.cleaned_data['mata_pelajaran']
hasiltes.nilai = form.cleaned_data['nilai']
hasiltes.siswa = form.cleaned_data['siswa']
messages.add_message(request, messages.INFO, 'Data Berhasil Diupdate')
hasiltes.save(force_update=True)
return redirect('hasiltes:view')
else:
hasiltes = HasilTes.objects.all()
data = {
'form_mode':'edit',
'form': form,
'hasiltes': hasiltes,
}
messages.add_message(request, messages.INFO, 'Data Gagal Diupdate !!')
# return render(request, template, data)
return HttpResponse(form.errors)
class HapusHasilTesView(ManagementAccessView):
def get(self, request, id):
hasiltes = HasilTes.objects.filter(id=id)
if hasiltes.exists():
hasiltes.first().delete()
messages.add_message(request, messages.INFO, 'Data Berhasil Dihapus')
return redirect('hasiltes:view')
else:
messages.add_message(request, messages.INFO, 'Data Gagal Dihapus !!')
|
[
"yunuslon97@gmail.com"
] |
yunuslon97@gmail.com
|
a32cbb4f180530ca941c558c1b528e2e3c1b2831
|
3f992958e8897a957e3b99c1391f48821fdcfb68
|
/Tests/Test_InputUserData.py
|
1ac375b7603157d74f5a1d3a01593690872e242d
|
[] |
no_license
|
GlubokinV/ProgramSort
|
d36143898b8f88591906944ea2698a7685009998
|
49d7d5d44c383dbc22f1367e1c8e06f5d78537b8
|
refs/heads/main
| 2023-04-14T01:07:18.039866
| 2021-04-15T18:29:54
| 2021-04-15T18:29:54
| 357,791,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
import unittest
from Program.InputUserData import DataInput
class TestExample(unittest.TestCase):
"""Тестирование функции подготовки вводимых данных для обработки"""
def test_example_only_KZS(self):
"""Разбивает строку из КЗС на отдельные символы?"""
x = DataInput()
res = x.example('ЗКЗС')
self.assertEqual(res, ['З', 'К', 'З', 'С'])
def test_example_spec_char(self):
"""Разбивает строку на отдельные символы, если есть спец. символы?"""
x = DataInput()
res = x.example('З!@#')
self.assertEqual(res, ['З', '!', '@', '#'])
def test_example_exceptions_not_KZS(self):
"""Появляется исключение, при вводе строки без КЗС?"""
x = DataInput()
self.assertRaises(ValueError, x.example, '!@#')
def test_example_exceptions_empty_line(self):
"""Появляется исключение, при вводе пустой строки?"""
x = DataInput()
self.assertRaises(ValueError, x.example, [])
def test_example_long_line(self):
"""Разбивает длинную строку на отдельные символы?"""
x = DataInput()
res = x.example('З!@sd;о;;;;qqqqqqqq')
self.assertEqual(res, ['З', '!', '@', 's', 'd', ';', 'о', ';', ';', ';', ';', 'q', 'q',
'q', 'q', 'q', 'q', 'q', 'q'])
if __name__ == '__main__':
unittest.main()
|
[
"glubokin_vyacheslav@mail.ru"
] |
glubokin_vyacheslav@mail.ru
|
ee1e1f57e91c9660d5c3c00a1b93a25981c9b7af
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GLES1/OES/stencil8.py
|
412215b93288aa6a7ffd69e2b7144d4f8c58a803
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974
| 2021-01-22T10:51:14
| 2021-01-22T10:51:14
| 307,847,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.constant import Constant as _C
# Code generation uses this
# End users want this...
from OpenGL.raw.GLES1 import _errors
_EXTENSION_NAME = 'GLES1_OES_stencil8'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GLES1, 'GLES1_OES_stencil8', error_checker=_errors._error_checker)
GL_STENCIL_INDEX8_OES = _C('GL_STENCIL_INDEX8_OES', 0x8D48)
|
[
"rudnik49@gmail.com"
] |
rudnik49@gmail.com
|
3ecc132007c95e7d84efb6b8054cfa19c6580661
|
2f18521276c42457d09ed2470d5c663e28f47504
|
/triangle.py
|
d28eaf844529e5dc99b8991c930a7c56204d79f5
|
[] |
no_license
|
benpulido21/python_prueba_isep
|
6240734de9aac0e9cbe9637e01712e10578f5f10
|
575340fa8b20bd0d68494509e96a38d30897606e
|
refs/heads/main
| 2023-01-06T18:10:46.727378
| 2020-11-10T02:16:05
| 2020-11-10T02:16:05
| 311,506,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
#declaramos la clase triangulo con el constructor y sus atributos privados
class Triangulo(object):
def __init__(self):
self.__a = 6
self.__b = 10
self.__c = 6
#metodo para calcular la altura y area de un triangulo e imprimirlo
def CalcularArea(self):
a = self.__a
b = self.__b
c = self.__c
h = a*b/c
area = b*h/2
print("El area del triangulo es: ",area)
#instanciamos el clase y objeto y llamamos a la funcion calcular area
calculo = Triangulo()
calculo.CalcularArea()
|
[
"noreply@github.com"
] |
benpulido21.noreply@github.com
|
0fd7e4600c1f60ad50ddd026cfbcf63facd62174
|
28a462a28f443c285ca5efec181ebe36b147c167
|
/tests/compile/basic/es2019/IdentifierReference[0,0].EarlyErrors.spec
|
83d1d21d575792c488f8b800f94658ad98179c23
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kaist-plrg/jstar
|
63e71f9156860dc21cccc33a9f6c638dfee448ea
|
1282919127ea18a7e40c7a55e63a1ddaaf7d9db4
|
refs/heads/main
| 2022-07-22T08:12:34.947712
| 2022-02-27T04:19:33
| 2022-02-27T11:06:14
| 384,045,526
| 6
| 4
|
NOASSERTION
| 2022-02-27T11:05:26
| 2021-07-08T07:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 316
|
spec
|
<li>
It is a Syntax Error if this production has a <sub>[Yield]</sub> parameter and StringValue of |Identifier| is `"yield"`.
</li>
<li>
It is a Syntax Error if this production has an <sub>[Await]</sub> parameter and StringValue of |Identifier| is `"await"`.
</li>
|
[
"h2oche22@gmail.com"
] |
h2oche22@gmail.com
|
cd3fe391a3c4e68535080180aaa0f1eeff2ee745
|
4a4a4653bc70bb25241c532307e8fbb2142fee92
|
/helloworld.py
|
bac76f0f7208476e12978f924dd2f1c2c564d4b7
|
[] |
no_license
|
v202009/PROG1700-basics-demo-da
|
4830daa5792fe3eb0e43fa7f6e29d9d91d3f510a
|
7a7e2e1863843bde4482e642de9f245e8eee79d8
|
refs/heads/master
| 2022-12-17T14:48:12.920194
| 2020-09-22T14:54:49
| 2020-09-22T14:54:49
| 297,675,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
print("Hello World!")
print("Hello, again!")
print("Goodbye...")
|
[
"w0432542@nscc.ca"
] |
w0432542@nscc.ca
|
1b3b2b81385f029cf071465b5a67afbfcc29d78e
|
5de16bedbdabbd552dcebdeec3d2be30b59c80e3
|
/Array Partition I/solution.py
|
7d7a8e6875fca546f93cf66fffa754e5d02d2abf
|
[] |
no_license
|
kimjaspermui/LeetCode
|
39f6aa61a27e7bf2aac7de940941ec376f3640e8
|
c01002206fcc1b3ed35d1ba1e83dffdff5fc16a5
|
refs/heads/master
| 2020-12-02T07:51:49.508145
| 2018-07-15T04:21:48
| 2018-07-15T04:21:48
| 96,737,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
class Solution:
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# sort the list first
nums = sorted(nums)
# get the sum by pairing them from the left
tempSum = 0
for i in range(0, len(nums), 2):
tempSum += min(nums[i], nums[i+1])
return tempSum
|
[
"kmui@ucsd.edu"
] |
kmui@ucsd.edu
|
40468665411f90fee120f8434b3ccd804495257c
|
66d25596e2165da03a40e6f31c59789d27d71572
|
/ban/commands/auth.py
|
e89df21f79ef2f27edbe8cdc087963ab72e72c8b
|
[] |
no_license
|
christopheprudent/ban
|
72336bfaf1e5bc24477f00ada2aa1bd37c12d614
|
187a2e7719cb2d7ef64a35c22975cea3686097bb
|
refs/heads/master
| 2021-01-18T11:23:38.890962
| 2016-05-11T16:39:42
| 2016-05-11T16:39:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
from ban.auth.models import Token, User
from ban.commands import command, reporter
from ban.core import context
from . import helpers
@command
@helpers.session
def dummytoken(**kwargs):
"""Create a dummy token for dev."""
session = context.get('session')
Token.delete().where(Token.access_token == 'token').execute()
Token.create(session=session.pk, access_token="token", expires_in=3600*24)
reporter.notice('Created token', 'token')
@command
def createuser(username=None, email=None, is_staff=False, **kwargs):
"""Create a user.
is_staff set user staff
"""
if not username:
username = helpers.prompt('Username')
if not email:
email = helpers.prompt('Email')
password = helpers.prompt('Password', confirmation=True, hidden=True)
validator = User.validator(username=username, email=email)
if not validator.errors:
user = validator.save()
user.set_password(password)
if is_staff:
user.is_staff = True
user.save()
reporter.notice('Created', user)
else:
reporter.error('Errored', validator.errors)
|
[
"yb@enix.org"
] |
yb@enix.org
|
8a57fcfb46d097525f0644f0772dd6101d30b6e8
|
491727fd417d9012055601b322a2a85ae0f09fa7
|
/quoteShare/main/urls.py
|
6e55634ce2ed20f5264a3adb439c327eba0655d3
|
[] |
no_license
|
neumeye9/quoteShare
|
77f49e393a4744a7b2314dec6664bf51be32155b
|
796e9c8d1806f2cfa08e72a9b4e4417ac001880d
|
refs/heads/master
| 2021-06-28T04:14:02.274981
| 2017-09-18T16:49:42
| 2017-09-18T16:49:42
| 103,965,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
"""main URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
urlpatterns = [
url(r'^', include('apps.belt_app.urls'))
]
|
[
"noreply@github.com"
] |
neumeye9.noreply@github.com
|
2429dd6e0f7ede974b1e9f1a3904282d0dd48edf
|
5c89de8b2b6ceaabd7c094514fd96440577a4347
|
/profiles/phylosift_gene_filter.py
|
4c2f80e0a7d27a301d9e03467a6b8269375a6010
|
[] |
no_license
|
Ecogenomics/PhylogeneticM
|
0d959d1bfc6b5abf8a112efd2a45114d22a6ee38
|
463243f5e02a32d0427dca784c9c20bb264a07f6
|
refs/heads/master
| 2020-06-03T13:59:50.113576
| 2013-03-11T06:06:30
| 2013-03-11T06:06:30
| 5,761,180
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,272
|
py
|
import os
import sys
import psycopg2 as pg
import xml.etree.ElementTree as ET
import common
valid_configs = [('individual', type(None), "Create individual FASTA files for each marker instead of a concatenated alignment."),
('taxonomy', type(''), "Filter for organisms with this taxonomy (internal genome tree taxonomy)"),
('reject_missing_taxonomy', type(None), "Reject any genomes that have not got an assigned taxonomy."),
('gene_count_threshold', type(0), "Reject any genomes with a phylosift gene count below this threshold.")]
def GetValidConfigOptions():
return valid_configs
def MakeTreeData(GenomeDatabase, marker_set_id, list_of_genome_ids, directory, prefix=None, config_dict=None):
"""
TODO - This function is less ugly than before, but it needs to be cleaned up.
"""
if not os.path.isdir(directory):
GenomeDatabase.ReportError("Directory doesn't exist: " + directory)
return None
if not common.CheckPassedConfigsAgainstKnownConfigs(config_dict, GetValidConfigOptions()):
return None
cur = GenomeDatabase.conn.cursor()
# Get the total number of markers in this marker set
total_marker_count = len(GenomeDatabase.GetMarkerIdListFromMarkerSetId(marker_set_id))
# For each genome, get the number of markers in this marker set that genome contains
genome_gene_counts = GenomeDatabase.GetAlignedMarkersCountForGenomeFromMarkerSetId(marker_set_id)
# This filter needs to know about phylosift markers, calculate them if they haven't been calculated
for genome_id in list_of_genome_ids:
uncalculated_markers = GenomeDatabase.FindUncalculatedMarkersForGenomeId(genome_id,
GenomeDatabase.GetMarkerIdListFromMarkerSetId(1))
if len(uncalculated_markers) > 0:
if GenomeDatabase.debugMode:
print "Calculating markers for ", genome_id
print uncalculated_markers
GenomeDatabase.RecalculateMarkersForGenome(genome_id, uncalculated_markers)
# For each genome, get the number of phylosift markers it contains
phylosift_gene_counts = GenomeDatabase.GetAlignedMarkersCountForGenomeFromMarkerSetId(1)
# For all of the markers, get the expected marker size.
cur.execute("SELECT markers.id, markers.database_specific_id, size " +
"FROM markers, marker_set_contents " +
"WHERE set_id = %s " +
"AND marker_id = markers.id "
"ORDER by markers.id", (marker_set_id,))
chosen_markers = dict()
for marker_id, database_specific_id, size in cur:
chosen_markers[marker_id] = {'database_specific_id': database_specific_id, 'size': size}
individual_marker_fasta = dict()
genome_info = dict()
if prefix is None:
prefix = "genome_tree_data"
gg_fh = open(os.path.join(directory, prefix + "_concatenated.greengenes"), 'wb')
fasta_concat_fh = open(os.path.join(directory, prefix + "_concatenated.faa"), 'wb')
# Run through each of the genomes and make the magic happen.
for genome_id in list_of_genome_ids:
cur.execute("SELECT tree_id, name, XMLSERIALIZE(document metadata as text), username "+
"FROM genomes, users "+
"WHERE users.id = owner_id "+
"AND genomes.id = %s", (genome_id,))
result = cur.fetchone()
if not result:
continue
(tree_id, name, xmlstr, owner) = result
# Check if complete.
if 'gene_count_threshold' in config_dict and phylosift_gene_counts[genome_id] < int(config_dict['gene_count_threshold']):
sys.stderr.write("WARNING: Genome %s has < %i markers (%i) in the database and will be missing from the output files.\n" %
(tree_id,
int(config_dict['gene_count_threshold']),
phylosift_gene_counts[genome_id]))
continue
# Populate genome info
genome_info['markers'] = dict()
genome_info['name'] = name
genome_info['tree_id'] = tree_id
genome_info['xmlstr'] = xmlstr
genome_info['owner'] = owner
# update XML metadata
genome_info.update(common.GetInternalMetadataDictFromXMLString(xmlstr))
# Test taxonomy
taxonomy_success = True
if not genome_info['internal_tax']:
if 'reject_missing_taxonomy' in config_dict:
taxonomy_success = False
else:
if ('taxonomy' in config_dict and config_dict['taxonomy']):
if (genome_info['internal_tax'].find(config_dict['taxonomy']) < 0):
taxonomy_success = False
if not taxonomy_success:
if GenomeDatabase.debugMode:
sys.stderr.write("%s (%s) not included. Filtered at taxonomy stage (taxonomy: %s)\n" % (genome_info['tree_id'],
genome_info['name'],
genome_info['internal_tax']))
sys.stderr.flush()
continue
cur.execute("SELECT aligned_markers.marker_id, sequence " +
"FROM aligned_markers, marker_set_contents "+
"WHERE marker_set_contents.marker_id = aligned_markers.marker_id " +
"AND genome_id = %s " +
"AND sequence is NOT NULL "+
"AND set_id = %s ", (genome_id, marker_set_id))
if (cur.rowcount == 0):
sys.stderr.write("WARNING: Genome %s has no markers for this marker set in the database and will be missing from the output files.\n" % tree_id)
sys.stderr.flush()
continue
for marker_id, sequence in cur:
genome_info['markers'][marker_id] = sequence
aligned_seq = '';
for marker_id in chosen_markers.keys():
if marker_id in genome_info['markers']:
sequence = genome_info['markers'][marker_id]
fasta_outstr = ">%s\n%s\n" % (genome_info['tree_id'],
sequence)
try:
individual_marker_fasta[marker_id].append(fasta_outstr)
except KeyError:
individual_marker_fasta[marker_id] = [fasta_outstr]
else:
sequence = chosen_markers[marker_id]['size'] * '-'
aligned_seq += sequence
fasta_outstr = ">%s\n%s\n" % (genome_info['tree_id'],
aligned_seq)
gg_list = ["BEGIN",
"db_name=%s" % genome_info['tree_id'],
"organism=%s" % genome_info['name'],
"prokMSA_id=%s" % genome_info['tree_id'],
"owner=%s" % genome_info['owner'],
"genome_tree_tax_string=%s" % genome_info['internal_tax'],
"greengenes_tax_string=%s" % genome_info['gg_tax'],
"core_list_status=%s" % genome_info['core_list_status'],
"remark=%iof%i" % (genome_gene_counts[genome_id], total_marker_count),
"warning=",
"aligned_seq=%s" % (aligned_seq),
"END"]
gg_outstr = "\n".join(gg_list) + "\n\n";
fasta_concat_fh.write(fasta_outstr)
gg_fh.write(gg_outstr)
gg_fh.close()
fasta_concat_fh.close()
if "individual" in config_dict:
for marker_id in chosen_markers.keys():
fasta_individual_fh = open(os.path.join(directory, prefix + "_" + str(marker_id) + "_" +
chosen_markers[marker_id]['database_specific_id'] + ".faa"),
'wb')
fasta_individual_fh.write(''.join(individual_marker_fasta[marker_id]))
fasta_individual_fh.close()
return True
|
[
"a.skarshewski@uq.edu.au"
] |
a.skarshewski@uq.edu.au
|
0262980918b061d33ebd285f6a64c74430421acf
|
d08f165c3dc46a8736588306e9e7c8964e22c7f8
|
/Inheritance_practice.py
|
155a3ae4a096145cf5aafda3763306bafce8a401
|
[] |
no_license
|
vermadev54/Python-oops
|
fa2b12389612cd222852041e8d76792d57ebee68
|
d1a1d44117c34efa950b2397718d10d5ef268e86
|
refs/heads/master
| 2021-07-12T05:23:31.785691
| 2020-08-16T06:37:15
| 2020-08-16T06:37:15
| 192,698,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
class student(object):
__city="patna"
def __init__(self,name,email,roll,father_name):
self.name=name
self._roll=roll
self.email=email
self.__father_name=father_name
def __str__(self):
return "{},{},{}".format(self.name,self.roll,self.email)
def __gat_father_name(self):
return self.__father_name
def get_fname(self):
return self.__gat_father_name(),self.__city
class school_employee(student):
def __init__(self,name,email,emp_id):
super().__init__(name, email,roll=None,father_name=None)
self.emp_id=emp_id
def __str__(self):
return "name: {},email: {},emp_id: {}".format(self.name, self.email,self.emp_id)
stu1=student("jainendra","jain@gmail.com","17","sid")
print(stu1.__dict__)
print(stu1.get_fname())
stu1.get_fname()
emp1=school_employee("bhanu","bhanu@gmail.com","2020123")
print (emp1.__dict__)
print(emp1._roll)
|
[
"kumar"
] |
kumar
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.