blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7607e9704c88c53d9835277d1ea1ef9a9502af4
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/pyinstaller/build/lib/PyInstaller/hooks/hook-reportlab.pdfbase._fontdata.py
|
4765de6acb08fe810b8f6653f67d9aa9e9d91ae1
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:70c3529a579452cddbe6fe3200d83c2064c7fa3a858851cb516a77177a79c258
size 538
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
d257dfaf805251a449fcec03c06aa55d97d4de0a
|
107c161846246ead49747b8257f0bffe57ff3866
|
/megabeast/old/test_models.py
|
2734639aa5e69b50f99f8ba217e27761079d30f9
|
[
"BSD-3-Clause"
] |
permissive
|
BEAST-Fitting/megabeast
|
e1334786d23d33795bb5f613586fe1615406e102
|
b6b9efbe197bde0a5372bc9f09699ad9d3d99886
|
refs/heads/master
| 2023-06-07T18:14:52.276860
| 2022-02-17T21:25:07
| 2022-02-17T21:25:07
| 111,114,312
| 2
| 11
| null | 2023-05-27T08:09:38
| 2017-11-17T14:44:50
|
Python
|
UTF-8
|
Python
| false
| false
| 988
|
py
|
import pytest
from megabeast.mbsettings import mbsettings
from megabeast.singlepop_dust_model import MB_Model
fd_model = {
"Av": {
"name": "gaussian",
"varnames": ["mean", "sigma"],
"varinit": [1.0, 0.25],
"prior": {
"name": "flat",
"var_minmax": [[0.005, 5.0], [0.05, 1.0]],
},
},
"Rv": {
"name": "gaussian",
"varnames": ["mean", "sigma"],
"varinit": [3.1, 0.25],
"prior": {
"name": "flat",
"var_minmax": [[2.0, 6.0], [0.05, 1.0]],
},
}
}
models = [fd_model, fd_model]
@pytest.mark.parametrize("model", models)
def test_lnprior(model):
"""
Test that the lnprior handles the defined prior types
"""
priortypes = ["fixed", "flat"]
# setup params
params = mbsettings()
params.fd_model = model
mod = MB_Model(params)
for cprior in priortypes:
assert mod.lnprior(mod.start_params()) == 0.0, "test"
|
[
"kgordon@stsci.edu"
] |
kgordon@stsci.edu
|
a25f71c986a1212e69a747de32c5133c5b78a446
|
733496067584ee32eccc333056c82d60f673f211
|
/idfy_rest_client/models/person_navn_adresse.py
|
5f7e495f992d7a22d700bd3462ef1b39195572d8
|
[
"MIT"
] |
permissive
|
dealflowteam/Idfy
|
90ee5fefaa5283ce7dd3bcee72ace4615ffd15d2
|
fa3918a6c54ea0eedb9146578645b7eb1755b642
|
refs/heads/master
| 2020-03-07T09:11:15.410502
| 2018-03-30T08:12:40
| 2018-03-30T08:12:40
| 127,400,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,252
|
py
|
# -*- coding: utf-8 -*-
"""
idfy_rest_client.models.person_navn_adresse
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
from idfy_rest_client.api_helper import APIHelper
class PersonNavnAdresse(object):
"""Implementation of the 'Person.NavnAdresse' model.
TODO: type model description here.
Attributes:
status_field (string): TODO: type description here.
status_dato_field (datetime): TODO: type description here.
fodselsdato_field (datetime): TODO: type description here.
navn_field (string): TODO: type description here.
adresse_field (string): TODO: type description here.
postnr_field (string): TODO: type description here.
poststed_field (string): TODO: type description here.
kommune_field (string): TODO: type description here.
fylke_field (string): TODO: type description here.
alder_field (int): TODO: type description here.
kjonn_field (string): TODO: type description here.
telefon_field (list of string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"status_field":'statusField',
"status_dato_field":'statusDatoField',
"fodselsdato_field":'fodselsdatoField',
"navn_field":'navnField',
"adresse_field":'adresseField',
"postnr_field":'postnrField',
"poststed_field":'poststedField',
"kommune_field":'kommuneField',
"fylke_field":'fylkeField',
"alder_field":'alderField',
"kjonn_field":'kjonnField',
"telefon_field":'telefonField'
}
def __init__(self,
status_field=None,
status_dato_field=None,
fodselsdato_field=None,
navn_field=None,
adresse_field=None,
postnr_field=None,
poststed_field=None,
kommune_field=None,
fylke_field=None,
alder_field=None,
kjonn_field=None,
telefon_field=None,
additional_properties = {}):
"""Constructor for the PersonNavnAdresse class"""
# Initialize members of the class
self.status_field = status_field
self.status_dato_field = APIHelper.RFC3339DateTime(status_dato_field) if status_dato_field else None
self.fodselsdato_field = APIHelper.RFC3339DateTime(fodselsdato_field) if fodselsdato_field else None
self.navn_field = navn_field
self.adresse_field = adresse_field
self.postnr_field = postnr_field
self.poststed_field = poststed_field
self.kommune_field = kommune_field
self.fylke_field = fylke_field
self.alder_field = alder_field
self.kjonn_field = kjonn_field
self.telefon_field = telefon_field
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
status_field = dictionary.get('statusField')
status_dato_field = APIHelper.RFC3339DateTime.from_value(dictionary.get("statusDatoField")).datetime if dictionary.get("statusDatoField") else None
fodselsdato_field = APIHelper.RFC3339DateTime.from_value(dictionary.get("fodselsdatoField")).datetime if dictionary.get("fodselsdatoField") else None
navn_field = dictionary.get('navnField')
adresse_field = dictionary.get('adresseField')
postnr_field = dictionary.get('postnrField')
poststed_field = dictionary.get('poststedField')
kommune_field = dictionary.get('kommuneField')
fylke_field = dictionary.get('fylkeField')
alder_field = dictionary.get('alderField')
kjonn_field = dictionary.get('kjonnField')
telefon_field = dictionary.get('telefonField')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(status_field,
status_dato_field,
fodselsdato_field,
navn_field,
adresse_field,
postnr_field,
poststed_field,
kommune_field,
fylke_field,
alder_field,
kjonn_field,
telefon_field,
dictionary)
|
[
"runes@unipluss.no"
] |
runes@unipluss.no
|
ec83755d7ceef5096637db15a3827324da7e2c2b
|
8311a4bc770d91b802b573b91ccc9c8e03f15123
|
/ac/administrative_communication/doctype/assignment_transaction_action/assignment_transaction_action.py
|
3c8b30c4d720c9453f81113f640d58aafa2d481c
|
[
"MIT"
] |
permissive
|
aymenit2008/ac
|
d6756d32d291255ae4ba6d08700489b27377890f
|
8791d86679e10b57417559fcc9ca149321104845
|
refs/heads/main
| 2023-03-20T21:21:51.168016
| 2021-02-17T08:55:26
| 2021-02-17T08:55:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Aseel and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class AssignmentTransactionAction(Document):
pass
|
[
"frappe@ubuntu.vm"
] |
frappe@ubuntu.vm
|
f115526fdf25a9871453e6e4fb7ace173904b177
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/YXjx9G5uQ4CdYPuB4_9.py
|
2d28dd3b028ee0f16f9bafa9b0ccc53ff8f93765
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
"""
**Mubashir** needs your help to compare two lists.
First list `lst1` contains some numbers and second list `lst2` contains
**squared values of numbers given in the first list**.
Create a function which takes these two lists and returns `True` if all square
values are available, `False` otherwise.
lst1 = [121, 144, 19, 161, 19, 144, 19, 11]
lst2 = [121, 14641, 20736, 361, 25921, 361, 20736, 361]
Returns `True` because **121 is square of 11, 14641 is square of 121, 20736 is
square of 144, 361 is square of 19, 25921 the square of 161, and so on...**
lst1 = [121, 144, 19, 161, 19, 144, 19, 11]
lst2 = [11*11, 121*121, 144*144, 19*19, 161*161, 19*19, 144*144, 19*19]
### Examples
simple_comp([121, 144, 19, 161, 19, 144, 19, 11], [121, 14641, 20736, 361, 25921, 361, 20736, 361]) ➞ True
simple_comp([4, 4], [1, 31]) ➞ False
simple_comp([2, 2, 3], [4, 4, 9]) ➞ True
### Notes
Numbers can be in any order.
"""
def simple_comp(lst1, lst2):
if lst1 == None or lst2 == None:
return False
return sorted(lst2) == sorted([i * i for i in lst1])
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
1915d25c38cc211538989aaa0b2bf604b6778c6e
|
1619511136a1d861a23f4e8c643916a303ac222c
|
/apps/blog/models.py
|
812adf98991dd66d4cd9c379fbf0b1c81af9a691
|
[] |
no_license
|
2644783865/django2
|
29d00b4faa4169a5969d967e992d22b5285a5817
|
3909bfab85be620d95702eff4db0483b3676ac94
|
refs/heads/master
| 2020-06-29T00:05:57.622128
| 2019-08-03T08:30:51
| 2019-08-03T08:30:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,911
|
py
|
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth import get_user_model
# from django.template.defaultfilters import slugify
# from ckeditor.fields import RichTextField # 不包含上传文件
from ckeditor_uploader.fields import RichTextUploadingField # 包含上传文件
from pyquery import PyQuery as pq # pip install pyquery, 获取到html中的img图片地址返回
# from pypinyin import lazy_pinyin # pip install pypinyin
from uuslug import slugify # pip install django-uuslug
User = get_user_model()
# Create your models here.
class Source(models.Model):
"""
文章来源
"""
name = models.CharField(max_length=128, default="原创", unique=True, verbose_name="站点名称")
url = models.URLField(max_length=128, blank=True, null=True, verbose_name="url")
time_create = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
class Meta:
verbose_name = "文章来源"
verbose_name_plural = "文章来源列表"
def __str__(self):
return self.name
class Category(models.Model):
"""
节点类别表
"""
name = models.CharField(max_length=128, unique=True, verbose_name="类别名称")
slug = models.SlugField(max_length=128, unique=True, verbose_name="url标识符")
time_create = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
class Meta:
verbose_name = "节点类别"
verbose_name_plural = "节点分类列表"
def __str__(self):
return self.name
class Node(models.Model):
"""
节点表
"""
name = models.CharField(max_length=128, unique=True, verbose_name="节点名称")
# SlugField 是一个新闻术语(通常叫做短标题)。一个slug只能包含字母、数字、下划线或者是连字符,通常用来作为短标签。通常它们是用来放在地址栏的URL里的。
# 像CharField一样,你可以指定max_length(也请参阅该部分中的有关数据库可移植性的说明和max_length)。如果没有指定
# max_length, Django将会默认长度为50。
# 将Field.db_index设置为True。
# 根据某些其他值的值自动预填充SlugField通常很有用。你可以在admin中使用prepopulated_fields自动执行此操作。
slug = models.SlugField(max_length=128, unique=True, verbose_name="url标识符")
time_create = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
num_topics = models.IntegerField(default=0, verbose_name="主题数量")
category = models.ForeignKey(Category, on_delete=models.DO_NOTHING, verbose_name="所属类别")
show_status = models.BooleanField(default=True, verbose_name="显示状态")
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Node, self).save(*args, **kwargs)
class Meta:
verbose_name = "节点"
verbose_name_plural = "节点列表"
def __str__(self):
return self.name
class Tag(models.Model):
"""
文章标签
"""
name = models.CharField(max_length=50, unique=True, verbose_name="标签")
slug = models.SlugField(max_length=128, unique=True, verbose_name="url标识符")
time_create = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Tag, self).save(*args, **kwargs)
class Meta:
verbose_name = "文章标签"
verbose_name_plural = "文章标签列表"
def __str__(self):
return self.name
class Article(models.Model):
"""
主题表/文章表
"""
title = models.CharField(max_length=128, unique=True, verbose_name="标题")
slug = models.SlugField(max_length=128, unique=True, verbose_name="url标识符")
content = RichTextUploadingField(verbose_name="内容", config_name='awesome_ckeditor')
node = models.ForeignKey(Node, on_delete=models.DO_NOTHING, verbose_name="所属节点")
user = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name="user_article", verbose_name="作者")
source = models.ForeignKey(Source, on_delete=models.DO_NOTHING, verbose_name="来源", blank=True, null=True)
tags = models.ManyToManyField(Tag, verbose_name="标签", related_name="tags_article", blank=True)
num_views = models.IntegerField(default=0, verbose_name="浏览数量")
num_favorites = models.IntegerField(default=0, verbose_name="收藏数量")
last_answerer = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name="last_answerer_article", verbose_name="最后回复者", blank=True,
null=True)
show_status = models.BooleanField(default=True, verbose_name="显示状态")
time_create = models.DateTimeField(auto_now_add=True, verbose_name="发表时间")
time_update = models.DateTimeField(blank=True, null=True, auto_now=True, verbose_name="更新时间")
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Article, self).save(*args, **kwargs)
# 获取后台文本编辑器图文内容中图片url地址
def get_content_img_url(self):
temp = Article.objects.filter(pk=str(self.id)).values('content') # values获取Article数据表中的content字段内容
html = pq(temp[0]['content']) # pq方法获取编辑器html内容
# print(html, "\n", "----")
img_path = pq(html)('img').attr('src') # 截取html内容中的路径
# print("pic", img_path)
return img_path # 返回第一张图片路径
class Meta:
verbose_name = "文章"
verbose_name_plural = "文章列表"
def __str__(self):
title_short = self.title if len(self.title) < 15 else self.title[:12] + '...'
return "%s %s %s" % (self.id, self.user, title_short)
class FriendsURL(models.Model):
friend_name = models.CharField(max_length=50, unique=True, verbose_name="用户名称")
friend_image = models.ImageField(max_length=8 * 1024 * 1024 * 5, upload_to="friends", verbose_name="用户头像")
site_name = models.CharField(max_length=50, unique=True, verbose_name="网站名称")
site_link = models.URLField(max_length=256, blank=True, null=True, verbose_name="网站链接")
show_status = models.BooleanField(default=True, verbose_name="显示状态")
time_create = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
time_update = models.DateTimeField(blank=True, null=True, auto_now=True, verbose_name="更新时间")
class Meta:
verbose_name = "友情链接"
verbose_name_plural = "友情链接列表"
def __str__(self):
return self.friend_name
|
[
"zhuoqun527@qq.com"
] |
zhuoqun527@qq.com
|
602eb9121165edb4c2356d3bf98d987ff4c1ae16
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/sample/list_get_element_complex-2.py
|
6d104100d4429fa36c29aeb6f7ae3d25c91ca267
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
$Definition
def next_int() -> int:
global next
next = next + 1
return next
def make_list() -> [int]:
return [next_int(), next_int(), next_int()]
print(make_list()[next_int() - 3])
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
2a225dddd9fc8c12c4f1e8857ebbcc1a6a4cc4fc
|
e6e65a6704c20e6e0288cfc54915ee7ea9e1c0a7
|
/1recon/basicVersions/1pipeTrans/networks/modifiedVGG.py
|
ef58d51640d13f96c41606581d362c196412dd93
|
[] |
no_license
|
schatzkara/REU2019
|
fbb1f17d860c5d51a7ccae3ba106960d4c733949
|
6de28b5a8992f6122f2e9813de8b92d9e97ccbf3
|
refs/heads/master
| 2020-06-06T03:50:40.753334
| 2019-11-07T14:11:50
| 2019-11-07T14:11:50
| 192,629,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,172
|
py
|
# phase 3
# modified from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
import torch
import torch.nn as nn
from torchsummary import summary
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
class VGG(nn.Module):
"""
Class representing the modified VGG network to be used.
"""
def __init__(self, features, num_classes=1000, init_weights=True,
pretrained=False, weights_path=''):
"""
Initializes the modified VGG network.
:param features: All the network layers.
:param num_classes: (int) The number of classes used for classification.
:param init_weights: (bool) True if the network weights should be initialized; False otherwise.
:param pretrained: (bool) True if the network should be pretrained; False otherwise.
:param weights_path: (str) The path at which to pretrained weights are located.
"""
super(VGG, self).__init__()
self.features = features
if init_weights:
self._initialize_weights()
if pretrained:
self.load_weights(weights_path=weights_path)
def load_weights(self, weights_path):
state_dict = torch.load(weights_path)
bad_weights = ["features.17", "features.19", "features.21", "features.24",
"features.26", "features.28", "classifier.0", "classifier.3",
"classifier.6"]
new_state_dict = {}
for key, weight in state_dict.items():
first_per = key.index('.')
second_per = key[first_per + 1:].index('.')
id_ = key[:first_per + second_per + 1]
if id_ not in bad_weights:
new_state_dict[key] = weight
self.load_state_dict(new_state_dict)
def forward(self, x):
self.features = self.features
x = self.features(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, num_layers=None, batch_norm=False):
if num_layers is not None:
cfg = cfg[:num_layers]
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
# print('Modified VGG Model Successfully Built \n')
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512], # , 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512], # , 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512], # , 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512], # , 'M'],
}
num_layers_to_use = {
'A': 7,
'B': 9,
'D': 10,
'E': 11,
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, weights_path='', **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], num_layers=num_layers_to_use[cfg], batch_norm=batch_norm),
pretrained=pretrained, weights_path=weights_path, **kwargs)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
"""VGG 11-layer generator (configuration "A")
Args:
pretrained (bool): If True, returns a generator pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
"""VGG 11-layer generator (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a generator pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
"""VGG 13-layer generator (configuration "B")
Args:
pretrained (bool): If True, returns a generator pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
"""VGG 13-layer generator (configuration "B") with batch normalization
Args:
pretrained (bool): If True, returns a generator pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, weights_path='', **kwargs):
"""VGG 16-layer generator (configuration "D")
Args:
pretrained (bool): If True, returns a generator pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained=pretrained, progress=progress, weights_path=weights_path, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
"""VGG 16-layer generator (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a generator pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
"""VGG 19-layer generator (configuration "E")
Args:
pretrained (bool): If True, returns a generator pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, **kwargs):
"""VGG 19-layer generator (configuration 'E') with batch normalization
Args:
pretrained (bool): If True, returns a generator pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
if __name__ == "__main__":
print_summary = True
vgg = vgg16()
if print_summary:
summary(vgg, input_size=(3, 112, 112))
|
[
"36019128+schatzkara@users.noreply.github.com"
] |
36019128+schatzkara@users.noreply.github.com
|
39874ed6f673a12d3460b09f8f3745e4cbb3f6ed
|
65890d9024b035d997c78a3968c05908df81a9c4
|
/sharpy/managers/extensions/__init__.py
|
6728d67326888cd4880b83ceb0fe8f4bbd438321
|
[
"MIT"
] |
permissive
|
lladdy/sharpy-sc2
|
df43a266aa2ad90633b9a9f279fc14529740f30f
|
cf6f7850add6f8d33d6e7c5ccaf2a619e4838c52
|
refs/heads/develop
| 2023-08-03T17:49:51.939649
| 2023-07-31T10:04:34
| 2023-07-31T10:04:34
| 227,027,455
| 0
| 0
|
MIT
| 2023-08-30T08:39:00
| 2019-12-10T04:15:24
|
Python
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
from .enemy_army_predicter import EnemyArmyPredicter
from .build_detector import BuildDetector
from .game_analyzer import GameAnalyzer
from .data_manager import DataManager
from .chat_manager import ChatManager
from .memory_manager import MemoryManager
from .archon import ArchonManager
from .heat_map import HeatMapManager
from .custom_func_manager import CustomFuncManager
from .enemy_vision_manager import EnemyVisionManager
|
[
"aki.vanttinen@sedgestudios.com"
] |
aki.vanttinen@sedgestudios.com
|
8a42f484b703ca5e6d8bacf2c9fbedaa62340aff
|
c380976b7c59dadaccabacf6b541124c967d2b5a
|
/.history/src/data/data_20191028083133.py
|
1bf93266354860aafa16ec794d7068be824878af
|
[
"MIT"
] |
permissive
|
bkraft4257/kaggle_titanic
|
b83603563b4a3c995b631e8142fe72e1730a0e2e
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
refs/heads/master
| 2020-08-17T12:45:28.653402
| 2019-11-15T16:20:04
| 2019-11-15T16:20:04
| 215,667,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,814
|
py
|
import pandas as pd
import numpy as np
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
def __init__(self, filename: Union[str, Path], age_bins=None, drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={"age": "age_known"})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
# Only one passenger with title Lady. She was traveling with a sibling and no husband. Set title to Miss
# 2 Mlle and 1 Mme. All 3 were 24 years old and travelling alone. Retitled as Miss.
# 1 Sir. Male 49 years old. Travelling with a sibling.
# Revs were all males.
# 8 Drs. (7 male, 1 female) changed to Mr. and Mrs. respectively.
title_translator = {
"Mlle.": "Miss.",
"Mme.": "Miss.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"Rev.": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Miss.",
"the Countess. of": "Mrs.",
"Dr.":np.nan,
}
def __init__(
self,
raw_data,
adult_age_threshold_min=13,
age_bins=None,
fare_mode=None,
embarked_mode=None,
Xy_age_estimate=None,
drop_columns=None,
):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if age_bins is None:
age_bins = [0, 10, 20, 30, 40, 50, 60, np.inf]
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.Xy_age_estimate = Xy_age_estimate
self.age_bins = age_bins
self.Xy = self.raw.Xy_raw.copy()
if fare_mode is None:
fare_mode = self.Xy["fare"].mode()[0]
if embarked_mode is None:
embarked_mode = self.Xy["embarked"].mode()[0]
self.fare_mode = fare_mode
self.embarked_mode = embarked_mode
self.impute_missing_fare()
self.impute_missing_embarked()
self.extract_title()
self.extract_last_name()
self.extract_cabin_number()
self.extract_cabin_prefix()
self.estimate_age()
self.calc_age_bins()
self.calc_is_child()
self.calc_is_travelling_alone()
def calc_is_travelling_alone(self):
"""Create Boolean feature if passenger is travelling alone. (True=Traveling alone, False=Traveling in group)
"""
self.Xy["is_travelling_alone"] = (self.Xy.sibsp == 0) & (self.Xy.parch == 0)
def calc_is_child(self):
"""Calculate Boolean feature if passenger is a child as determined by the self.adult_age_threshold_min
"""
self.Xy["is_child"] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
"""
Extracts cabin number from ticket.
"""
self.Xy["cabin_number"] = self.Xy.ticket.str.extract("(\d+)$")
def extract_cabin_prefix(self):
"""Extracts cabin prefix from ticket.
"""
self.Xy["cabin_prefix"] = self.Xy.ticket.str.extract("^(.+) ")
def extract_title(self):
"""Extract title from the name using nameparser.
If the Title is empty then we will fill the title with either Mr or Mrs depending upon the sex. This
is adequate for the train and holdout data sets. The title being empty only occurs for passenger 1306
in the holdout data set. A more appropriate way to do this is to check on the sex and age to correctly
assign the title
"""
title = (self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
.replace({"":np.nan})
.fillna(self.Xy['sex'])
.replace({'female':'Mrs', 'male':'Mr'})
)
self.Xy["title"] = title
def extract_last_name(self):
"Extracts last name from "
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def calc_age_bins(self):
self.Xy["age_bin"] = pd.cut(
self.Xy.age, bins=[0, 10, 20, 30, 40, 50, 60, np.inf]
)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(self, groupby_columns=["sex", "title"]):
"""[summary]
Keyword Arguments:
groupby {list} -- [description] (default: {['sex','title']})
"""
if self.Xy_age_estimate is None:
self.Xy_age_estimate = (
self.Xy.groupby(groupby_columns).age_known.mean().to_frame().round(1)
)
self.Xy_age_estimate = self.Xy_age_estimate.rename(
columns={"age_known": "age_estimate"}
)
out_df = (
self.Xy.reset_index()
.merge(self.Xy_age_estimate, on=groupby_columns)
.set_index("passengerid")
)
out_df["age"] = out_df["age_known"].fillna(out_df["age_estimate"])
self.Xy = out_df
def impute_missing_fare(self):
self.Xy["fare"] = self.Xy["fare"].fillna(self.fare_mode)
def impute_missing_embarked(self):
self.Xy["embarked"] = self.Xy["embarked"].fillna(self.embarked_mode)
|
[
"bob.kraft@infiniteleap.net"
] |
bob.kraft@infiniteleap.net
|
e13f315fcdd9f188936c67487b7406fc615608c7
|
0add67e1d0c2915caf84c3af5151ca68d9bb5682
|
/API_PROJ/asgi.py
|
6dbdaac532933c67632230a99da41926e98002b0
|
[] |
no_license
|
kamran1231/MOVIE_REST_API
|
8b30d9b4ecacd788982a9da35046f9b159037c3e
|
677418f94225b60cc486e8027dd381e7d5549239
|
refs/heads/master
| 2023-05-31T23:17:25.761222
| 2021-06-29T12:20:05
| 2021-06-29T12:20:05
| 380,045,682
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
ASGI config for API_PROJ project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'API_PROJ.settings')
application = get_asgi_application()
|
[
"khanbrother805@gmail.com"
] |
khanbrother805@gmail.com
|
9596ca94cc905548e8c6637fbd52ec54c10719db
|
ce79d8a92ddc88ee17ccbfbab273fdb37600da0e
|
/0140_Word_Break_II.py
|
8afc6e7874b25d36b55e63afeae431ff16b40a41
|
[
"MIT"
] |
permissive
|
coldmanck/leetcode-python
|
5bf5bc489213a5835acc93b047e1b0ff7a1392bc
|
fd4cf122cfd4920f3bd8dce40ba7487a170a1b57
|
refs/heads/master
| 2023-06-09T02:30:49.681803
| 2023-05-27T04:01:57
| 2023-05-27T04:01:57
| 249,945,225
| 6
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
memo = {len(s): ['']}
def sentences(i):
if i in memo:
return memo[i]
memo[i] = []
for j in range(i + 1, len(s) + 1):
if s[i:j] in wordDict:
for tail in sentences(j):
tail_ans = tail if tail == '' else ' ' + tail
memo[i].append(s[i:j] + tail_ans)
return memo[i]
ans = sentences(0)
return ans
|
[
"coldmanck@gmail.com"
] |
coldmanck@gmail.com
|
006a2323f3e2a8bfdf5efcc2e45813474d8a20a5
|
5a7abc4537039860c49e9a80219efa759aad1b6f
|
/tests/providers/aws/services/trustedadvisor/trustedadvisor_errors_and_warnings/trustedadvisor_errors_and_warnings_test.py
|
e484ead322db370fdda3ad0f146d93ad57f31f8b
|
[
"Apache-2.0"
] |
permissive
|
sec-js/prowler
|
d5a06c72f5d7e490bade1167966f83f7a5d7ed15
|
f72be9a1e492ad593c9ac267d3ca07f626263ccd
|
refs/heads/master
| 2023-08-31T22:48:33.983360
| 2022-12-22T16:02:28
| 2022-12-22T16:02:28
| 243,866,744
| 0
| 0
|
Apache-2.0
| 2022-12-23T12:23:20
| 2020-02-28T22:37:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,973
|
py
|
from re import search
from unittest import mock
from uuid import uuid4
from prowler.providers.aws.services.trustedadvisor.trustedadvisor_service import Check
AWS_REGION = "eu-west-1"
AWS_ACCOUNT_NUMBER = "123456789012"
detector_id = str(uuid4())
class Test_trustedadvisor_errors_and_warnings:
def test_no_detectors(self):
trustedadvisor_client = mock.MagicMock
trustedadvisor_client.checks = []
with mock.patch(
"prowler.providers.aws.services.trustedadvisor.trustedadvisor_service.TrustedAdvisor",
trustedadvisor_client,
):
from prowler.providers.aws.services.trustedadvisor.trustedadvisor_errors_and_warnings.trustedadvisor_errors_and_warnings import (
trustedadvisor_errors_and_warnings,
)
check = trustedadvisor_errors_and_warnings()
result = check.execute()
assert len(result) == 0
def test_trustedadvisor_all_passed_checks(self):
trustedadvisor_client = mock.MagicMock
trustedadvisor_client.checks = []
trustedadvisor_client.checks.append(
Check(
id="check1",
name="check1",
region=AWS_REGION,
status="ok",
)
)
with mock.patch(
"prowler.providers.aws.services.trustedadvisor.trustedadvisor_service.TrustedAdvisor",
trustedadvisor_client,
):
from prowler.providers.aws.services.trustedadvisor.trustedadvisor_errors_and_warnings.trustedadvisor_errors_and_warnings import (
trustedadvisor_errors_and_warnings,
)
check = trustedadvisor_errors_and_warnings()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert search("ok", result[0].status_extended)
assert result[0].resource_id == "check1"
def test_trustedadvisor_error_check(self):
trustedadvisor_client = mock.MagicMock
trustedadvisor_client.checks = []
trustedadvisor_client.checks.append(
Check(
id="check1",
name="check1",
region=AWS_REGION,
status="error",
)
)
with mock.patch(
"prowler.providers.aws.services.trustedadvisor.trustedadvisor_service.TrustedAdvisor",
trustedadvisor_client,
):
from prowler.providers.aws.services.trustedadvisor.trustedadvisor_errors_and_warnings.trustedadvisor_errors_and_warnings import (
trustedadvisor_errors_and_warnings,
)
check = trustedadvisor_errors_and_warnings()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert search("error", result[0].status_extended)
assert result[0].resource_id == "check1"
|
[
"noreply@github.com"
] |
sec-js.noreply@github.com
|
6bc17ae0f3d584cafcfcbc9be7eb61a854ffef96
|
d047fed56a7d1de1d7c32ce83b8d62646fa7d19e
|
/average_kitne_hai.py
|
65f5b01aaddc83e61a33bb02bfb1e308241e38a0
|
[] |
no_license
|
shantinavgurukul/listQuestions
|
508b6bd489731d5b8a9ba1a27e5b88b1bb27341a
|
21f413f65b374e5fa63e0366591895757146d7c7
|
refs/heads/master
| 2022-11-23T07:56:15.392836
| 2020-08-02T03:51:46
| 2020-08-02T03:51:46
| 284,384,225
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
# elements = [23, 14, 56, 12, 19, 9, 15, 25, 31, 42, 43]
# index=0
# # average=0
# list1=[]
# list2=[]
# while(index<len(elements)):
# if(elements[index]%2!=0):
# list1.append(elements[index])
# # list1=list1+1
# average=elements[index]//7
# else:
# list2.append(elements[index])
# # list2=list2+1
# average=elements[index]//4
# index=index+1
# print("even number is:",list1)
# print(average)
# print("odd number is:",list2)
# print(average)
elements = [23, 14, 56, 12, 19, 9, 15, 25, 31, 42, 43]
# even=0
# odd=0
# esum1=0
# osum2=0
# index=0
# while index<len(elements):
# if elements[index]%2==0:
# even=even+1
# esum1=esum1+elements[index]
# else:
# odd=odd+1
# osum2=osum2+elements[index]
# index=index+1
# avarge1=esum1/even
# avarge2=osum2/odd
# print(esum1)
# print(osum2)
# print(even)
# print(odd)
# print(avarge1)
# print(avarge2)
index=0
evensum=0
oddsum=0
evencount=0
oddcount=0
while(index<len(elements)):
if(elements[index]%2==0):
evensum=evensum+elements[index]
evencount=evencount+1
else:
oddsum=oddsum+elements[index]
oddcount=oddcount+1
index=index+1
evenAverage=evensum//evencount
oddAverage=oddsum//oddcount
print(evensum)
print(oddsum)
print(evenAverage)
print(oddAverage)
|
[
"you@example.com"
] |
you@example.com
|
e574432f721c510f0ea06c7b99ee335b99f78d75
|
091e97bcfe5acc0635bd601aa8497e377b74d41a
|
/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py
|
ddecf9cc6017fc3b0177a74857c0523c116be0e6
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
openshift/openshift-tools
|
d59b63778f25cb8fb3c7a0253afe22a173e72f9d
|
e342f6659a4ef1a188ff403e2fc6b06ac6d119c7
|
refs/heads/prod
| 2023-08-30T01:52:04.108978
| 2022-03-23T21:07:28
| 2022-03-23T21:07:28
| 36,827,699
| 170
| 254
|
Apache-2.0
| 2022-06-16T12:11:51
| 2015-06-03T20:09:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,662
|
py
|
# pylint: disable=missing-docstring
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
# pylint: disable=too-many-branches,too-many-statements,too-many-arguments
def run(self, terms, variables=None, regions_enabled=True, short_version=None,
**kwargs):
predicates = []
if short_version is None:
if 'openshift_release' in variables:
release = variables['openshift_release']
if release.startswith('v'):
short_version = release[1:]
else:
short_version = release
short_version = '.'.join(short_version.split('.')[0:2])
elif 'openshift_version' in variables:
version = variables['openshift_version']
short_version = '.'.join(version.split('.')[0:2])
else:
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if short_version not in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
if short_version == 'latest':
short_version = '3.11'
# Predicates ordered according to OpenShift Origin source:
# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
if short_version in ['3.6']:
predicates.extend([
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
{'name': 'MaxGCEPDVolumeCount'},
{'name': 'MatchInterPodAffinity'},
{'name': 'NoDiskConflict'},
{'name': 'GeneralPredicates'},
{'name': 'PodToleratesNodeTaints'},
{'name': 'CheckNodeMemoryPressure'},
{'name': 'CheckNodeDiskPressure'},
])
if short_version in ['3.7', '3.8']:
predicates.extend([
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
{'name': 'MaxGCEPDVolumeCount'},
{'name': 'MaxAzureDiskVolumeCount'},
{'name': 'MatchInterPodAffinity'},
{'name': 'NoDiskConflict'},
{'name': 'GeneralPredicates'},
{'name': 'PodToleratesNodeTaints'},
{'name': 'CheckNodeMemoryPressure'},
{'name': 'CheckNodeDiskPressure'},
{'name': 'NoVolumeNodeConflict'},
])
if short_version in ['3.9', '3.10', '3.11']:
predicates.extend([
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
{'name': 'MaxGCEPDVolumeCount'},
{'name': 'MaxAzureDiskVolumeCount'},
{'name': 'MatchInterPodAffinity'},
{'name': 'NoDiskConflict'},
{'name': 'GeneralPredicates'},
{'name': 'PodToleratesNodeTaints'},
{'name': 'CheckNodeMemoryPressure'},
{'name': 'CheckNodeDiskPressure'},
{'name': 'CheckVolumeBinding'},
])
if regions_enabled:
region_predicate = {
'name': 'Region',
'argument': {
'serviceAffinity': {
'labels': ['region']
}
}
}
predicates.append(region_predicate)
return predicates
|
[
"mwoodson@redhat.com"
] |
mwoodson@redhat.com
|
d48d8dcf7839f5f1eb56a0f65558ff6462b25843
|
e18a8c8ed113d51d99ae942204016a883925163d
|
/25stringchains/trial1.py
|
2f432bcc4243c495265c72aa4e256c91f3b7b115
|
[] |
no_license
|
saurabh11baghel/dsalgo
|
e385fc7739c59e8a78a4f7e30d3cedfdfa4eac06
|
88a108bc3ce7ec95ffad8d95079260a2fc5f6e12
|
refs/heads/master
| 2021-04-30T08:36:56.312035
| 2017-02-20T22:32:40
| 2017-02-20T22:32:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 29 19:04:08 2017
@author: abgoswam
"""
#_words_cnt = int(raw_input().strip())
#_words_i=0
#_words = []
#
#while _words_i < _words_cnt:
# _words_item = raw_input().strip()
# _words.append(_words_item)
# _words_i += 1
#
#print(_words)
#def longestChain(words):
#words = ['a', 'b', 'ba', 'bca', 'bdca', 'bda']
words = ['ab', 'abc']
#if words is None or len(words) <= 0:
# return 0
words_sorted = sorted(words, key=lambda x: len(x))
chain = {}
for s in words_sorted:
print("word 's' : {0}".format(s))
if len(s) == 1:
chain[s] = 1
else:
# iterate over the characters in s
_m = 0
for i in range(len(s)):
s_prime = (s[:i] + s[i+1:])
print("word 's_prime' : {0}".format(s_prime))
if s_prime in chain:
_m = max(_m, chain[s_prime])
if _m > 0:
_m += 1
chain[s] = _m
argmax_s = max(chain, key=lambda i:chain[i])
#return chain[argmax_s]
#words = ['a', 'b', 'ba', 'bca', 'bdca', 'bda']
##words = ['ab', 'ba']
#print(longestChain(words))
|
[
"abgoswam@gmail.com"
] |
abgoswam@gmail.com
|
ea66726c00ea40930cc3627462df3a3a9bac9593
|
de470f0bad289ab9e8633a4527a2bf4c14a6b2d9
|
/manage-cli/get_sql.py
|
db028fd8f72b46fd3783c4f3d499549155512d80
|
[] |
no_license
|
DingGuodong/kissops
|
f1a22e557ae6b1ea4366d88ffceb157fb0bb5450
|
06ca11d2082d4d7ae88681fc0456a7502d134b27
|
refs/heads/master
| 2021-01-11T11:02:33.583667
| 2018-04-17T08:20:06
| 2018-04-17T08:20:06
| 72,810,369
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 625
|
py
|
#!/usr/bin/python
# encoding: utf-8
# -*- coding: utf8 -*-
"""
Created by PyCharm.
File: LinuxBashShellScriptForOps:get_sql.py
User: Guodong
Create Date: 2017/8/23
Create Time: 16:01
Description:
References: http://luozhaoyu.iteye.com/blog/1510635
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(BASE_DIR)
try:
app_name = raw_input("What app name would you like get SQL?\n")
os.system("python manage.py dbshell {name}".format(name=app_name))
except OSError as e:
print e.message
sys.exit(1)
|
[
"uberurey_ups@163.com"
] |
uberurey_ups@163.com
|
3d99cb9d17223816aa55786f68bb96c5ee9f812a
|
8c917dc4810e2dddf7d3902146280a67412c65ea
|
/v_7/Dongola/common/account_asset_custom/wizard/account_data_migration.py
|
d16a9ffe00724622ae063143e33836fc790a1d27
|
[] |
no_license
|
musabahmed/baba
|
d0906e03c1bbd222d3950f521533f3874434b993
|
0b997095c260d58b026440967fea3a202bef7efb
|
refs/heads/master
| 2021-10-09T02:37:32.458269
| 2018-12-20T06:00:00
| 2018-12-20T06:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,631
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv, orm
from tools.translate import _
class account_post_move(osv.osv_memory):
"""
Account move line reconcile wizard, it checks for the write off the reconcile entry or directly reconcile.
"""
_name = 'account.post.move'
_columns = {
'move_date': fields.date('Move date', required=True),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'init_account':fields.many2one('account.account', 'Initial Account'),
'reval_account':fields.many2one('account.account', 'Revalue Account'),
}
def trans_rec_reconcile_full(self, cr, uid, ids, context=None):
"""Method to post data migration to the asset by:
creating new asset,
create new opertion of initial then calling post method
create new opertion of revalue then calling post method.
@return: True
"""
if context is None:
context = {}
context.update({'group':True})
account_data = self.pool.get('account.data.move')
location_obj=self.pool.get('account.asset.location')
category_obj=self.pool.get('account.asset.category')
asset_obj=self.pool.get('account.asset.asset')
history_obj=self.pool.get('account.asset.history')
depreciation_line_obj=self.pool.get('account.asset.depreciation.line')
period_obj = self.pool.get('account.period')
wiz_obj = self.browse(cr, uid, ids, context)[0]
asset_ids = []
dprc_line_ids=[]
context.update({'company_id':wiz_obj.journal_id.company_id.id})
pids = period_obj.find(cr, uid, wiz_obj.move_date, context=context)
if not pids:
raise osv.except_osv(_('Warning !'), _('Check the date'))
if 'active_ids' in context and context['active_ids']:
data = account_data.browse(cr, uid, context['active_ids'], context)
for rec in data:
cat_id = category_obj.search(cr, uid, [('code', '=', rec.categ_code), ('company_id','=',wiz_obj.journal_id.company_id.id) ],context=context)
loc_id = location_obj.search(cr, uid, [('code', '=', rec.location_code) ],context=context)
if not loc_id:
account_data.write(cr, uid, rec.id, {'status': 'No location'}, context=context)
continue
if location_obj.browse(cr, uid, loc_id, context)[0].company_id.id != wiz_obj.journal_id.company_id.id:
account_data.write(cr, uid, rec.id, {'status': 'Different company and asset location Journals'}, context=context)
continue
if not cat_id:
account_data.write(cr, uid, rec.id, {'status': 'No category'}, context=context)
continue
if rec.book_value < 0 :
account_data.write(cr, uid, rec.id, {'status': 'Book value less than zero'}, context=context)
continue
asset_id = asset_obj.create(cr,uid,{
'name':rec.description,
'category_id':cat_id[0],
'date_start': rec.comm_date ,
'period_id':pids,
'quantity':rec.quantity,
'location':loc_id},context)
asset_ids.append(int(asset_id))
history_id=history_obj.create(cr,uid,{
'type':'initial',
'name':rec.description,
'quantity':rec.quantity,
'amount': rec.book_value ,
'account_id':wiz_obj.init_account.id,
'user_id':uid,
'date': wiz_obj.move_date,
'period_id': pids[0],
'asset_id':asset_id,
},context)
history_obj.create_operation_move(cr,uid,[history_id],context)
if rec.revalue_amount > 0:
history_id=history_obj.create(cr,uid,{
'type':'reval',
'name':rec.description,
'quantity':rec.quantity,
'amount': rec.revalue_amount ,
'account_id':wiz_obj.reval_account.id,
'user_id':uid,
'date': wiz_obj.move_date,
'period_id': pids[0],
'asset_id':asset_id,
},context)
history_obj.create_operation_move(cr,uid,[history_id],context)
asset_obj.validate(cr,uid,[asset_id],context)
if rec.total_depreciation > 0:
dprc_line_id=depreciation_line_obj.create(cr, uid,{'amount':rec.total_depreciation,
'name':rec.description,
'asset_id':asset_id,
'sequence':asset_id,
'depreciated_value':0.0,
'depreciation_date':wiz_obj.move_date,
'remaining_value':rec.book_value-rec.total_depreciation,
},context)
dprc_line_ids.append(dprc_line_id)
if asset_ids:
depreciation_line_obj.create_move( cr, uid, dprc_line_ids, context={})
asset_obj.compute_depreciation_board(cr,uid,asset_ids,context)
cr.execute('delete FROM account_data_move WHERE id = %s ', (rec.id,))
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"bakry@exp-sa.com"
] |
bakry@exp-sa.com
|
ed96c6be42b15bb8c1698da40cf12109cdc094d7
|
6440a113597191d3b78aa6b5cae6cea9fb057e2e
|
/Daily-Grind/73.py
|
5c3188a2d7ef2ce664500cd3404c4e1039778d18
|
[] |
no_license
|
DarshanGowda0/LC-Grind
|
40c162d8894df81ea7124f66daf20f86f327b6cb
|
f7b9a86797d52ab1057f0300352c0c5670a59bd5
|
refs/heads/master
| 2023-01-28T01:27:45.195164
| 2020-12-06T03:58:14
| 2020-12-06T03:58:14
| 277,024,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
que = deque([root])
ans = []
while que:
res = []
for _ in range(len(que)):
node = que.popleft()
res.append(node.val)
if node.left:
que.append(node.left)
if node.right:
que.append(node.right)
ans.append(res)
return ans
|
[
"darshan.gowda008@gmail.com"
] |
darshan.gowda008@gmail.com
|
da1ef01eb25bcea1e1925a60a6dd48a073d52c65
|
a1232023595eed48bf3d56c0c1dcb8f05cdc261a
|
/204. Count Primes/204.py
|
3ebfd940c7ef3e38ddb147a2bdf0c8607730b934
|
[] |
no_license
|
NKcell/leetcode
|
2393ec3f8dc0e26b9ff098a592e4ffa9d7b774b8
|
88dec1c2106950e82819a0dd16425a9ee8fdaca4
|
refs/heads/master
| 2020-07-24T05:27:42.269903
| 2020-05-06T07:45:07
| 2020-05-06T07:45:07
| 207,814,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
"""
超时
def countPrimes(n):
if n < 3:
return 0
if n == 3:
return 1
primes = [2]
re = 1
for i in range(2,n):
flag = 0
for j in primes:
if i%j == 0:
flag = 1
break
if flag == 0:
re += 1
primes.append(i)
return re
"""
def countPrimes(n):
primes = [True]*n
if n < 3:
return 0
primes[0] = False
primes[1] = False
for i in range(2,int(n**0.5) + 1):
if primes[i]:
for j in range(i*i,n,i):
primes[j] = False
return sum(primes)
print(countPrimes(3))
"""
class Solution:
# @param {integer} n
# @return {integer}
def countPrimes(self, n):
if n < 3:
return 0
primes = [True] * n
primes[0] = primes[1] = False
for i in range(2, int(n ** 0.5) + 1):
if primes[i]:
primes[i * i: n: i] = [False] * len(primes[i * i: n: i])
return sum(primes)
"""
|
[
"517343690@qq.com"
] |
517343690@qq.com
|
996034a8186f4a0272c75010ab385800ae034cad
|
9e2d79a2cf1dbeaffe8ef897bb53f94af8b5b68c
|
/ichnaea/api/locate/tests/test_constants.py
|
476eaa06eae7fec95ad3c105304313df9d89e7f8
|
[
"Apache-2.0"
] |
permissive
|
amolk4games/ichnaea
|
a7d1cbd12b6aa5c0d877fca380080b08fcff24b8
|
907c542da05b428c8e994bce1537390e22b3ca58
|
refs/heads/master
| 2021-01-19T07:21:54.851167
| 2016-04-08T15:20:37
| 2016-04-08T15:21:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,359
|
py
|
from ichnaea.api.locate.constants import DataAccuracy
from ichnaea.tests.base import TestCase
class DataAccuracyTest(TestCase):
def test_compare(self):
self.assertTrue(DataAccuracy.high < DataAccuracy.medium)
self.assertTrue(DataAccuracy.high < DataAccuracy.low)
self.assertTrue(DataAccuracy.medium < DataAccuracy.low)
self.assertTrue(DataAccuracy.medium != DataAccuracy.high)
self.assertTrue(DataAccuracy.low == DataAccuracy.low)
self.assertTrue(DataAccuracy.low < DataAccuracy.none)
self.assertFalse(DataAccuracy.none == 'ab')
def test_compare_number(self):
self.assertTrue(DataAccuracy.none == float('inf'))
self.assertTrue(DataAccuracy.low > 50000)
self.assertTrue(DataAccuracy.low > 50000.0)
self.assertTrue(DataAccuracy.medium == 50000)
self.assertTrue(DataAccuracy.medium >= 50000.0)
self.assertTrue(DataAccuracy.medium <= 50000)
self.assertFalse(DataAccuracy.medium != 50000.0)
self.assertTrue(500.0 <= DataAccuracy.high)
self.assertFalse(1000.1 <= DataAccuracy.high)
def test_uncomparable(self):
with self.assertRaises(TypeError):
DataAccuracy.low < object()
with self.assertRaises(TypeError):
DataAccuracy.low >= 'ab'
with self.assertRaises(TypeError):
DataAccuracy.low > DataAccuracy
def test_from_number(self):
self.assertEqual(DataAccuracy.from_number(1), DataAccuracy.high)
self.assertEqual(DataAccuracy.from_number(-0.1), DataAccuracy.high)
self.assertEqual(DataAccuracy.from_number(1000), DataAccuracy.high)
self.assertEqual(DataAccuracy.from_number(1000.1), DataAccuracy.medium)
self.assertEqual(DataAccuracy.from_number(10 ** 5), DataAccuracy.low)
self.assertEqual(DataAccuracy.from_number(10 ** 9), DataAccuracy.none)
with self.assertRaises(TypeError):
DataAccuracy.from_number(None)
with self.assertRaises(ValueError):
DataAccuracy.from_number('ab')
def test_hash(self):
accuracies = {
DataAccuracy.none: 0,
DataAccuracy.low: 1,
DataAccuracy.medium: 2,
DataAccuracy.high: 3,
}
self.assertEqual(set(accuracies.values()),
set([0, 1, 2, 3]))
|
[
"hanno@hannosch.eu"
] |
hanno@hannosch.eu
|
095ee990026e24affd14798a17fabcd80698962c
|
2560feda0e6875e797571e3992192c1ad9b223ef
|
/A_CNN_sub_K-32-32-64-128_KS-37-37-37-37_MP-12-22-22-32_DO-2-2-2-2-2_AD.py
|
4c6f65a575028004a22f425a7df2a827ad59a62d
|
[] |
no_license
|
kikivanderheijden/CNN_SoundLoc_NC
|
7060945e62973d21e9389659cb38b76e7fe76f7c
|
c158481524cef6af92487a16a711b00255732762
|
refs/heads/master
| 2023-02-07T16:20:39.412567
| 2020-12-29T14:50:18
| 2020-12-29T14:50:18
| 319,252,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,989
|
py
|
#------------------------------------------------------------------------------
# Specifications
#------------------------------------------------------------------------------
# specify directories
dir_wrfiles = "/workspace/notebooks/models" # for testing on DSRI
#dir_wrfiles = r"C:\Users\kiki.vanderheijden\Documents\PostDoc_Auditory\DeepLearning" # for testing locally
# import libraries
from tensorflow.keras import layers
from tensorflow.keras import models # contains different types of models (use sequential model here?)
from tensorflow.keras import optimizers # contains different types of back propagation algorithms to train the model,
# including sgd (stochastic gradient
#from CustLoss_MSE import cust_mean_squared_error # note that in this loss function, the axis of the MSE is set to 1
from CustLoss_cosine_distance_angular import cos_dist_2D_angular # note that in this loss function, the axis of the MSE is set to 1
from CustMet_cosine_distance_angular import cos_distmet_2D_angular
# specify parameters
modelname = 'CNN_sub_K-32-32-64-128_KS-37-37-37-37_MP-12-22-22-32_DO-2-2-2-2-2_AD'
time_sound = 750 # input dimension 1 (time)
nfreqs = 99 # input dimension 2 (frequencies)
#------------------------------------------------------------------------------
# Define model architecture
#------------------------------------------------------------------------------
# CNN 1 - left channel
in1 = layers.Input(shape=(time_sound,nfreqs,1)) # define input (rows, columns, channels (only one in my case))
model_l_conv1 = layers.Conv2D(32,(3,7),activation='relu', padding = 'same')(in1) # define first layer and input to the layer
model_l_conv1_mp = layers.MaxPooling2D(pool_size = (1,2))(model_l_conv1)
model_l_conv1_mp_do = layers.Dropout(0.2)(model_l_conv1_mp)
# CNN 1 - right channel
in2 = layers.Input(shape=(time_sound,nfreqs,1)) # define input
model_r_conv1 = layers.Conv2D(32,(3,7),activation='relu', padding = 'same')(in2) # define first layer and input to the layer
model_r_conv1_mp = layers.MaxPooling2D(pool_size = (1,2))(model_r_conv1)
model_r_conv1_mp_do = layers.Dropout(0.2)(model_r_conv1_mp)
# CNN 2 - merged
model_final_merge = layers.Subtract()([model_l_conv1_mp_do, model_r_conv1_mp_do])
model_final_conv1 = layers.Conv2D(32,(3,7),activation='relu', padding = 'same')(model_final_merge)
model_final_conv1_mp = layers.MaxPooling2D(pool_size = (2,2))(model_final_conv1)
model_final_conv1_mp_do = layers.Dropout(0.2)(model_final_conv1_mp)
# CNN 3 - merged
model_final_conv2 = layers.Conv2D(64,(3,7), activation = 'relu', padding = 'same')(model_final_conv1_mp_do)
model_final_conv2_mp = layers.MaxPooling2D(pool_size = (2,2))(model_final_conv2)
model_final_conv2_mp_do = layers.Dropout(0.2)(model_final_conv2_mp)
# CNN 4 - merged
model_final_conv3 = layers.Conv2D(128,(3,7), activation = 'relu', padding = 'same')(model_final_conv2_mp_do)
model_final_conv3_mp = layers.MaxPooling2D(pool_size = (3,2))(model_final_conv3)
model_final_conv3_mp_do = layers.Dropout(0.2)(model_final_conv3_mp)
# flatten
model_final_flatten = layers.Flatten()(model_final_conv3_mp_do)
model_final_dropout = layers.Dropout(0.2)(model_final_flatten) # dropout for regularization
predicted_coords = layers.Dense(2, activation = 'tanh')(model_final_dropout) # I have used the tanh activation because our outputs should be between -1 and 1
#------------------------------------------------------------------------------
# Create model
#------------------------------------------------------------------------------
# create
model = models.Model(inputs = [in1,in2], outputs = predicted_coords) # create
# compile
model.compile(loss = cos_dist_2D_angular, optimizer = optimizers.Adam(), metrics=['cosine_proximity','mse', cos_distmet_2D_angular])
# print summary
model.summary()
# save
model.save(dir_wrfiles+'/A_'+modelname+'.h5') # save model
|
[
"Kiki.vanderHeijden@unimaas.nl"
] |
Kiki.vanderHeijden@unimaas.nl
|
74105e5ec2623d439250e684bf167a4b94637c36
|
e9ff112a590a2707e66c518328ba71a4d964846a
|
/train_scripts/train_img.py
|
36a145fe2b40065fa0ddee19383018c080fd1909
|
[
"MIT"
] |
permissive
|
n644t031/fastMRI-kspace
|
60a6ca9679ede25f0db89f174647a8451a578331
|
4c484b3183e9f06838b5ee108af283611c2e1e77
|
refs/heads/master
| 2022-08-30T17:19:23.105996
| 2020-05-24T13:55:40
| 2020-05-24T13:55:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,028
|
py
|
import torch
from torch import nn, optim
from pathlib import Path
from utils.run_utils import initialize, save_dict_as_json, get_logger, create_arg_parser
from utils.train_utils import create_custom_data_loaders
from train.subsample import MaskFunc
from data.input_transforms import Prefetch2Device, TrainPreProcessK
from data.output_transforms import OutputReplaceTransformK
from models.ks_unet import UnetKS
from train.model_trainers.model_trainer_IMG import ModelTrainerIMG
from metrics.custom_losses import CSSIM
def train_img(args):
# Maybe move this to args later.
train_method = 'IMG'
# Creating checkpoint and logging directories, as well as the run name.
ckpt_path = Path(args.ckpt_root)
ckpt_path.mkdir(exist_ok=True)
ckpt_path = ckpt_path / train_method
ckpt_path.mkdir(exist_ok=True)
run_number, run_name = initialize(ckpt_path)
ckpt_path = ckpt_path / run_name
ckpt_path.mkdir(exist_ok=True)
log_path = Path(args.log_root)
log_path.mkdir(exist_ok=True)
log_path = log_path / train_method
log_path.mkdir(exist_ok=True)
log_path = log_path / run_name
log_path.mkdir(exist_ok=True)
logger = get_logger(name=__name__, save_file=log_path / run_name)
# Assignment inside running code appears to work.
if (args.gpu is not None) and torch.cuda.is_available():
device = torch.device(f'cuda:{args.gpu}')
logger.info(f'Using GPU {args.gpu} for {run_name}')
else:
device = torch.device('cpu')
logger.info(f'Using CPU for {run_name}')
# Saving peripheral variables and objects in args to reduce clutter and make the structure flexible.
args.run_number = run_number
args.run_name = run_name
args.ckpt_path = ckpt_path
args.log_path = log_path
args.device = device
save_dict_as_json(vars(args), log_dir=log_path, save_name=run_name)
# Input transforms. These are on a per-slice basis.
# UNET architecture requires that all inputs be dividable by some power of 2.
divisor = 2 ** args.num_pool_layers
mask_func = MaskFunc(args.center_fractions, args.accelerations)
data_prefetch = Prefetch2Device(device)
input_train_transform = TrainPreProcessK(mask_func, args.challenge, args.device, use_seed=False, divisor=divisor)
input_val_transform = TrainPreProcessK(mask_func, args.challenge, args.device, use_seed=True, divisor=divisor)
# train_transform = InputTransformK(mask_func, args.challenge, args.device, use_seed=False, divisor=divisor)
# val_transform = InputTransformK(mask_func, args.challenge, args.device, use_seed=True, divisor=divisor)
# DataLoaders
train_loader, val_loader = create_custom_data_loaders(args, transform=data_prefetch)
losses = dict(
cmg_loss=nn.MSELoss(reduction='mean'),
img_loss=CSSIM(filter_size=7)
)
output_transform = OutputReplaceTransformK()
data_chans = 2 if args.challenge == 'singlecoil' else 30 # Multicoil has 15 coils with 2 for real/imag
model = UnetKS(in_chans=data_chans, out_chans=data_chans, ext_chans=args.chans, chans=args.chans,
num_pool_layers=args.num_pool_layers, min_ext_size=args.min_ext_size, max_ext_size=args.max_ext_size,
use_ext_bias=args.use_ext_bias).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.init_lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_red_epoch, gamma=args.lr_red_rate)
trainer = ModelTrainerIMG(args, model, optimizer, train_loader, val_loader,
input_train_transform, input_val_transform, output_transform, losses, scheduler)
# TODO: Implement logging of model, losses, transforms, etc.
trainer.train_model()
if __name__ == '__main__':
settings = dict(
# Variables that almost never change.
challenge='multicoil',
data_root='/media/veritas/D/FastMRI',
log_root='./logs',
ckpt_root='./checkpoints',
batch_size=1, # This MUST be 1 for now.
chans=32,
num_pool_layers=4,
save_best_only=True,
center_fractions=[0.08, 0.04],
accelerations=[4, 8],
smoothing_factor=8,
# Variables that occasionally change.
max_images=8, # Maximum number of images to save.
num_workers=1,
init_lr=1E-4,
gpu=1, # Set to None for CPU mode.
max_to_keep=1,
img_lambda=100,
start_slice=10,
min_ext_size=3, # 1x1 extractor is included by default.
max_ext_size=15, # This trial is running with max 15 extractors!!!
# Variables that change frequently.
sample_rate=0.02,
num_epochs=50,
verbose=False,
use_slice_metrics=True, # Using slice metrics causes a 30% increase in training time.
lr_red_epoch=40,
lr_red_rate=0.1,
use_ext_bias=True,
# prev_model_ckpt='',
)
options = create_arg_parser(**settings).parse_args()
train_img(options)
|
[
"veritas9872@gmail.com"
] |
veritas9872@gmail.com
|
6e927067f2f9b821d839a61e0c4bc806d450dca4
|
df7b40e95718ac0f6071a0ba571b42efc81cf6de
|
/configs/cd_stb/liky_base_config.py
|
b09f7d24727fc477b50af4bc28b1118a4838038e
|
[
"Apache-2.0"
] |
permissive
|
shinianzhihou/ChangeDetection
|
87fa2c498248e6124aeefb8f0ee8154bda36deee
|
354e71234bef38b6e142b6ba02f23db958582844
|
refs/heads/master
| 2023-01-23T20:42:31.017006
| 2023-01-09T11:37:24
| 2023-01-09T11:37:24
| 218,001,748
| 162
| 29
|
Apache-2.0
| 2022-11-03T04:11:00
| 2019-10-28T08:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,758
|
py
|
_base_ = [
# '../_base_/models/cd_vit.py',
# '../_base_/datasets/two_input.py',
# '../_base_/default_runtime.py',
# '../_base_/schedules/schedule_40k.py'
]
# model settings
norm_cfg = dict(type='BN', requires_grad=True) # TO: BN
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='SiameseEfficientNet',
name='efficientnet_b1',
fusion='diff',
# pretrained=True,
checkpoint_path='../weights/efficientnet_b1-533bc792.pth',
),
decode_head=dict(
type='UPerHead',
in_channels=[24, 40, 112, 320],
in_index=[1, 2, 3, 4],
pool_scales=(1, 2, 3, 6),
channels=512,
dropout_ratio=0.1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=[0.8, 1.2])),
auxiliary_head=dict(
type='FCNHead',
in_channels=112,
in_index=3,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
# dataset settings
dataset_type = 'TwoInputDataset'
img_norm_cfg = dict(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
train_pipeline = [
dict(type='RandomResizedCrop', height=512, width=512, p=0.8),
dict(type='RandomRotate90',p=0.5),
dict(type='RandomBrightnessContrast',p=0.2),
dict(type='GaussianBlur',p=0.3),
dict(type='HorizontalFlip', p=0.5),
dict(type='VerticalFlip', p=0.5),
dict(type='Normalize',**img_norm_cfg),
dict(type='ToTensorV2'),
]
test_pipeline = [
# dict(type='CenterCrop', height=256, width=256, p=1.0),
dict(type='Normalize',**img_norm_cfg),
dict(type='ToTensorV2'),
]
data_root = '/cache'
train_file = './work_dirs/cd_stb/meta_files/train.v1.txt'
val_file = './work_dirs/cd_stb/meta_files/val.v1.txt'
test_file = './work_dirs/cd_stb/meta_files/test.txt'
data = dict(
samples_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
meta_file=train_file,
data_root=data_root,
sep='\t',
imdecode_backend='pillow',
c255_t1_in_mask=False,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
meta_file=val_file,
data_root=data_root,
sep='\t',
imdecode_backend='pillow',
c255_t1_in_mask=False,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
meta_file=val_file,
data_root=data_root,
sep='\t',
imdecode_backend='pillow',
c255_t1_in_mask=False,
pipeline=test_pipeline))
# optimizer
optimizer = dict(
paramwise_cfg = dict(
custom_keys={
'head': dict(lr_mult=4.)}),
type='SGD',
lr=1e-3,
momentum=0.9,
weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-6, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=6000)
checkpoint_config = dict(by_epoch=False, interval=500)
evaluation = dict(interval=500000, metric='mIoU')
# runtime
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
|
[
"1178396201@qq.com"
] |
1178396201@qq.com
|
44f63e331e30929d37a460bce7411a4a6ae31475
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2308/60668/302743.py
|
9d04c19613bf9cf13a8648209475e330103ce9a5
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
def trees_5_after(s):
if s=="6 3 9":
print(0)
elif s=="7 4 9":
print(10)
else:
print(s)
if __name__=='__main__':
m,r = input().split()
s = input()
trees_5_after(s)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
280fe52635c663628f4545cf099b820d4a2990b4
|
18dca9a552f5aa9303536613ec39f19cebf6647c
|
/BM25/BM25IndexingPrograms/index_mag_en_cs_allyears.py
|
1133c0c15e7ed385f68695ce4327e05ac3873006
|
[
"MIT"
] |
permissive
|
ashwath92/MastersThesis
|
9a39ed7eec825ed559d09507721c21bd12e2ab9c
|
f74755dc0c32f316da3c860dd5dbfa4c9cad97b3
|
refs/heads/master
| 2021-08-16T12:01:33.282459
| 2020-06-27T16:00:16
| 2020-06-27T16:00:16
| 197,282,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
from gensim.parsing import preprocessing
import contractions
from tqdm import tqdm
import pysolr
# IMPORTANT: I'M KEEPING THE REFERENCE IDS IN THE CONTEXTS. SO WHILE CHECKING BM25,
# CONTEXTS WHICH REFER TO THE SAME PAPER MIGHT BE MORE SIMILAR (IF CITATIONS ALREADY
#EXIST)
def clean_text(text):
""" Cleans the text in the only argument in various steps
ARGUMENTS: text: content/title, string
RETURNS: cleaned text, string"""
# Expand contractions: you're to you are and so on.
text = contractions.fix(text)
# Remove punctuation -- all special characters
text = preprocessing.strip_multiple_whitespaces(preprocessing.strip_punctuation(text))
return text
solr = pysolr.Solr('http://localhost:8983/solr/mag_en_cs_all', always_commit=True)
list_for_solr = []
rownum = 0
filename = '/home/ashwath/Programs/MAGCS/AllYearsFiles/mag_cs_allyears.txt'
with open(filename, 'r') as file:
# list of lists
for line in tqdm(file):
solr_record = dict()
rownum += 1
parts = clean_text(line).split()
paperid = parts[0]
content = ' '.join(parts[1:])
solr_record['paperid'] = paperid
solr_record['content'] = content
if rownum % 10000 == 0:
list_for_solr.append(solr_record)
solr.add(list_for_solr)
list_for_solr = []
print(rownum)
else:
list_for_solr.append(solr_record)
solr.add(list_for_solr)
|
[
"ashwath92@gmail.com"
] |
ashwath92@gmail.com
|
a944ad7ef3a0746f8e1658a859afad898e97a673
|
56231e5b77a8b743e84e43d28691da36b89a0cca
|
/platform-tools/systrace/catapult/telemetry/telemetry/internal/platform/tracing_agent/cpu_tracing_agent_unittest.py
|
f87f00902938107c3301b6fa4b7a40f22fe76d77
|
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
cricketclubucd/davisdragons
|
ee3aa6ad72197c2218660843e03d58c562b965aa
|
99d5877377b80d1b20c78cc3c4c6f26795f29b14
|
refs/heads/master
| 2023-01-30T05:37:45.923195
| 2021-01-27T06:30:25
| 2021-01-27T06:30:25
| 96,661,120
| 2
| 2
|
MIT
| 2023-01-23T18:42:26
| 2017-07-09T04:32:10
|
HTML
|
UTF-8
|
Python
| false
| false
| 5,810
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import sys
import time
import unittest
from telemetry import decorators
from telemetry.internal.platform.tracing_agent import cpu_tracing_agent
from telemetry.internal.platform import tracing_agent
from telemetry.internal.platform import linux_platform_backend
from telemetry.internal.platform import mac_platform_backend
from telemetry.internal.platform import win_platform_backend
from telemetry.timeline import tracing_config
from tracing.trace_data import trace_data
SNAPSHOT_KEYS = ['pid', 'ppid', 'name', 'pCpu', 'pMem']
TRACE_EVENT_KEYS = ['name', 'tid', 'pid', 'ph', 'args', 'local', 'id', 'ts']
class FakeAndroidPlatformBackend(object):
def __init__(self):
self.device = 'fake_device'
def GetOSName(self):
return 'android'
class CpuTracingAgentTest(unittest.TestCase):
def setUp(self):
self._config = tracing_config.TracingConfig()
self._config.enable_cpu_trace = True
if sys.platform.startswith('win'):
self._desktop_backend = win_platform_backend.WinPlatformBackend()
elif sys.platform.startswith('darwin'):
self._desktop_backend = mac_platform_backend.MacPlatformBackend()
else:
self._desktop_backend = linux_platform_backend.LinuxPlatformBackend()
self._agent = cpu_tracing_agent.CpuTracingAgent(self._desktop_backend)
@decorators.Enabled('linux', 'mac', 'win')
def testInit(self):
self.assertTrue(isinstance(self._agent,
tracing_agent.TracingAgent))
self.assertFalse(self._agent._snapshots)
self.assertFalse(self._agent._snapshot_ongoing)
@decorators.Enabled('linux', 'mac', 'win')
def testIsSupported(self):
self.assertTrue(cpu_tracing_agent.CpuTracingAgent.IsSupported(
self._desktop_backend))
self.assertFalse(cpu_tracing_agent.CpuTracingAgent.IsSupported(
FakeAndroidPlatformBackend()))
@decorators.Enabled('linux', 'mac', 'win')
def testStartAgentTracing(self):
self.assertFalse(self._agent._snapshot_ongoing)
self.assertFalse(self._agent._snapshots)
self.assertTrue(self._agent.StartAgentTracing(self._config, 0))
self.assertTrue(self._agent._snapshot_ongoing)
time.sleep(2)
self.assertTrue(self._agent._snapshots)
self._agent.StopAgentTracing()
@decorators.Enabled('linux', 'mac', 'win')
def testStartAgentTracingNotEnabled(self):
self._config.enable_cpu_trace = False
self.assertFalse(self._agent._snapshot_ongoing)
self.assertFalse(self._agent.StartAgentTracing(self._config, 0))
self.assertFalse(self._agent._snapshot_ongoing)
self.assertFalse(self._agent._snapshots)
time.sleep(2)
self.assertFalse(self._agent._snapshots)
@decorators.Enabled('linux', 'mac', 'win')
def testStopAgentTracingBeforeStart(self):
self.assertRaises(AssertionError, self._agent.StopAgentTracing)
@decorators.Enabled('linux', 'mac', 'win')
def testStopAgentTracing(self):
self._agent.StartAgentTracing(self._config, 0)
self._agent.StopAgentTracing()
self.assertFalse(self._agent._snapshot_ongoing)
@decorators.Enabled('linux', 'mac', 'win')
def testCollectAgentTraceDataBeforeStop(self):
self._agent.StartAgentTracing(self._config, 0)
self.assertRaises(AssertionError, self._agent.CollectAgentTraceData,
trace_data.TraceDataBuilder())
self._agent.StopAgentTracing()
@decorators.Enabled('linux', 'mac', 'win')
def testCollectAgentTraceData(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
self.assertFalse(self._agent._snapshot_ongoing)
builder = builder.AsData()
self.assertTrue(builder.HasTracesFor(trace_data.CPU_TRACE_DATA))
@decorators.Enabled('linux', 'mac', 'win')
def testCollectAgentTraceDataFormat(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
time.sleep(2)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
builder = builder.AsData()
data = json.loads(builder.GetTracesFor(trace_data.CPU_TRACE_DATA)[0])
self.assertTrue(data)
self.assertEquals(set(data[0].keys()), set(TRACE_EVENT_KEYS))
self.assertEquals(set(data[0]['args']['snapshot'].keys()),
set(['processes']))
self.assertTrue(data[0]['args']['snapshot']['processes'])
self.assertEquals(set(data[0]['args']['snapshot']['processes'][0].keys()),
set(SNAPSHOT_KEYS))
@decorators.Enabled('linux', 'mac', 'win')
def testContainsRealProcesses(self):
builder = trace_data.TraceDataBuilder()
self._agent.StartAgentTracing(self._config, 0)
time.sleep(2)
self._agent.StopAgentTracing()
self._agent.CollectAgentTraceData(builder)
builder = builder.AsData()
data = json.loads(builder.GetTracesFor(trace_data.CPU_TRACE_DATA)[0])
self.assertTrue(data)
for snapshot in data:
found_unittest_process = False
processes = snapshot['args']['snapshot']['processes']
for process in processes:
if 'run_tests' in process['name']:
found_unittest_process = True
self.assertTrue(found_unittest_process)
@decorators.Enabled('win')
def testWindowsCanHandleProcessesWithSpaces(self):
proc_collector = cpu_tracing_agent.WindowsProcessCollector()
proc_collector.Init()
proc = proc_collector._ParseProcessString(
'0 1 Multi Word Process 50 75')
self.assertEquals(proc['ppid'], 0)
self.assertEquals(proc['pid'], 1)
self.assertEquals(proc['name'], 'Multi Word Process')
self.assertEquals(proc['pCpu'], 50)
|
[
"jena.suraj.k@gmail.com"
] |
jena.suraj.k@gmail.com
|
2cf2548ec33a387b618360771da7ed86198cafc8
|
8845a9557ef7a93a4235bc5aff059110c7c0846c
|
/python_sql/book_scraper.py
|
baa3c24c9aa47a73c32f47b19104f533080c500d
|
[] |
no_license
|
Makhanya/PythonMasterClass
|
c127791337c862bf5c6c8780a1643642d6e99ab6
|
241f48396e59cd20f1a275f15fa6fec3e8676bb6
|
refs/heads/master
| 2023-07-20T12:44:05.055259
| 2023-07-09T11:20:52
| 2023-07-09T11:20:52
| 86,587,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
import sqlite3
import requests
from bs4 import BeautifulSoup
# Request URL
response = requests.get("http://books.toscrape.com/catalogue/category/books/history_32/index.html")
soup = BeautifulSoup(response.text, "html.parser")
books = soup.find_all("article")
for book in books:
# print(book.find("h3").find("a")["title"])
# price = book.select(".price_color")[0].get_text()
# price = float(price.replace("£", "").replace("Â", ""))
ratings = {"Zero": 0, "One": 1, "Two": 2, "Three": 3, "Four": 4, "Five": 5}
paragraph = book.select(".star-rating")[0]
rating = paragraph.get_attribute_list("class")[-1]
int_rating = ratings[rating]
print(int_rating)
def get_title(book):
return book.find("h3").find("a")["title"]
# Initialize BS
# Extract Data we Went
# Save data to database
|
[
"makhanya.mzili@gmail.com"
] |
makhanya.mzili@gmail.com
|
0ff8b707234a1ca0f9f6545c2a2a81e78061d016
|
a48eaa4419b87c011abdee1eebfd04b469f4417b
|
/.history/ghostpost/models_20200210194514.py
|
6e30172547ebdc65f4aca769b0575aa72372211a
|
[] |
no_license
|
Imraj423/ghostpost
|
6418d6c9561528ac8c31dd70d8aae7fac4c77cca
|
4edc559eb1f9ef0d11aae78e2b1dbd5c4903ddb5
|
refs/heads/master
| 2021-01-02T13:32:58.032239
| 2020-02-11T23:21:31
| 2020-02-11T23:21:31
| 239,644,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
from django.db import models
from django.utils import timezone
class ghostPost(models.Model):
message = models.CharField(max_length=280)
time = models.DateTimeField(default=timezone.now)
like = models.IntegerField(default=0)
dislike = models.IntegerField(default=0)
is_Boast = models.BooleanField(
widget=models.CheckboxSelectMultiple())
def __str__(self):
return self.message
|
[
"dahqniss@gmail.com"
] |
dahqniss@gmail.com
|
bcd73e2c410197d8acf9939ea8cc23d1fff8e9fe
|
b73ce9da9ddf21e59444b36d0b747994072f42da
|
/tests/html_parser.py
|
af2feee2a62d712a60506d9076cf8a291d3e0b0f
|
[
"MIT"
] |
permissive
|
manga-py/providers
|
de68017205850f5d90869464aa41b1691dfca235
|
1ad9f74d1e672f62070632f77fc74e66d35d0e85
|
refs/heads/master
| 2020-07-28T17:29:07.923115
| 2020-05-23T08:27:22
| 2020-05-23T08:27:22
| 209,479,291
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,443
|
py
|
import unittest
from lxml.html import HtmlElement
from manga_py.providers.exceptions import *
from manga_py.providers.utils.html_parser import HtmlParser
from ._test_variables import TestVariables
class TestHtmlParser(unittest.TestCase, TestVariables):
def test_parser(self):
html = HtmlParser.parse(self.default_html)
self.assertEqual(len(HtmlParser.parse(self.default_html, 'a')), 1)
title = HtmlParser.select_one(html, 'title', 0)
self.assertEqual(HtmlParser.text(title), 'Title')
self.assertIsInstance(html, HtmlElement)
def test_background_image(self):
html = HtmlParser.parse(self.default_html)
self.assertEqual(
self.default_image_url,
HtmlParser.background_image(HtmlParser.select_one(html, 'div.image', 0))
)
with self.assertRaises(BackgroundImageExtractException) as e:
HtmlParser.background_image(HtmlParser.select_one(html, 'div.bad-image', 0))
self.assertEqual('background: url()', e.exception.style)
def test_get_empty_text(self):
html = HtmlParser.parse(self.default_html)
with self.assertRaises(InfoException) as e:
HtmlParser.text(HtmlParser.select_one(html, 'div.empty-element', 0))
self.assertEqual(('Element not have text',), e.exception.args)
with self.assertRaises(InfoException) as e:
HtmlParser.text(HtmlParser.select_one(html, 'div.inner-element-text', 0))
self.assertEqual(('Element not have text',), e.exception.args)
with self.assertRaises(InfoException) as e:
HtmlParser.text(HtmlParser.select_one(html, 'div.space-only-element', 0))
self.assertEqual(('Text is too short',), e.exception.args)
with self.assertRaises(InfoException) as e:
HtmlParser.text_full(HtmlParser.select_one(html, 'div.space-only-element', 0))
self.assertEqual(('Text is too short',), e.exception.args)
self.assertEqual('text', HtmlParser.text_full(HtmlParser.select_one(html, 'div.inner-element-text', 0)))
def test_attributes(self):
elements = HtmlParser.parse(self.default_html, '.empty-element')
self.assertEqual(['element-title'], HtmlParser.extract_attribute(elements, 'title'))
def test_cover(self):
html = HtmlParser.parse(self.default_html)
self.assertEqual(self.default_image_url, HtmlParser.cover(html, '.image > img'))
|
[
"sttv-pc@mail.ru"
] |
sttv-pc@mail.ru
|
ecc7bc3853d20d63778c3f3b2e9797f155087c66
|
0dcdf4d1e0d83e9af30369c5d7899f1ea069ead5
|
/project/backend/main/models.py
|
52ac188bfe05ccc30abb43c1bb2fe4e97623f310
|
[
"MIT"
] |
permissive
|
senavs/website-hosting
|
a4684aef35943ee3635237f9b99d7efe89cc6161
|
b38589ca4478d9ca0189d0bfcedcbd854de7eacc
|
refs/heads/master
| 2023-05-12T11:43:28.230688
| 2020-05-29T15:19:36
| 2020-05-29T15:19:36
| 264,193,522
| 1
| 0
|
MIT
| 2023-05-01T21:40:34
| 2020-05-15T12:56:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
import sqlite3
from sqlite3 import ProgrammingError
from typing import Iterable
# NOT USING ORM BECAUSE OF THE PROJECT REQUIREMENTS
class Database:
_connection = _cursor = None
__tablename__ = None
__tablecolumns__ = []
def __init__(self, url: str):
self.url = url
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def cursor(self):
return self._cursor
def insert(self, *args, **kwargs):
raise NotImplementedError()
def update(self, *args, **kwargs):
raise NotImplementedError()
def delete(self, *args, **kwargs):
raise NotImplementedError()
def open(self):
self._connection = sqlite3.connect(self.url)
self._cursor = self._connection.cursor()
def close(self):
self._connection.close()
def commit(self):
self._connection.commit()
def rollback(self):
self._connection.rollback()
def cursor_to_dict(self, cursors: Iterable):
return [dict(zip(self.__tablecolumns__, cursor)) for cursor in cursors]
def select_all(self):
if self._connection:
return self.cursor_to_dict(self.cursor.execute(f'SELECT * FROM "{self.__tablename__}" ;'))
raise ProgrammingError('ProgrammingError: Cannot operate on a closed database.')
def select_all_by(self, and_operator=True, **kwargs):
if self._connection:
if and_operator:
filters = ' AND '.join(f'{key} = {value}'.upper() for key, value in kwargs.items())
else:
filters = ' OR '.join(f'{key} = {value}'.upper() for key, value in kwargs.items())
return self.cursor_to_dict(self.cursor.execute(f'SELECT * FROM "{self.__tablename__}" WHERE {filters} ;'))
raise ProgrammingError('ProgrammingError: Cannot operate on a closed database.')
class DatabaseException(Exception):
"""Base class to database exception"""
|
[
"sena.matheus14@gmail.com"
] |
sena.matheus14@gmail.com
|
127dd90853c25dbea635b9dd8408f5b3bdcf5419
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02641/s218012358.py
|
150b550e08d6d7ff5fea6bdc3c1c73415536a324
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
from sys import stdin
readline = stdin.readline
def i_input(): return int(readline().rstrip())
def i_map(): return map(int, readline().rstrip().split())
def i_list(): return list(i_map())
def main():
X, N = i_map()
if N == 0:
print(X)
exit()
P = i_list()
p = list(map(lambda x: abs(x - X), P))
p.sort()
for i, j in enumerate(p, 1):
if i // 2 != j:
ans = X - (i // 2)
if ans in P:
ans = X + (i // 2)
break
else:
if N % 2 == 1:
ans = X - ((N + 1) // 2)
else:
ans = X - (N // 2)
if ans in P:
ans = X + (N // 2)
print(ans)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
175bf5483a2bdabcdb51e0f86efdccc41148535b
|
c15847d7c689d45aa7273e608ea447e28f1dfff6
|
/performance_testing/jmeter/cancel_running_jobs.py
|
e645fd6420384b57448a43010e86b29305a60ef8
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/bigquery-utils
|
1b9ae6cff5aaa25b7312c4393551352066e0aa53
|
2e30dc06ef6452d3d1b6cdc5a468732a2327d11c
|
refs/heads/master
| 2023-09-03T07:08:37.783915
| 2023-08-11T16:51:04
| 2023-08-11T16:51:04
| 201,975,309
| 906
| 269
|
Apache-2.0
| 2023-09-07T11:43:23
| 2019-08-12T17:12:09
|
Java
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from google.cloud import bigquery
def cancel_jobs(client):
for job in client.list_jobs(all_users=True, state_filter="RUNNING"):
client.cancel_job(job.job_id, location='us')
def get_cmd_line_args():
parser = ArgumentParser()
parser.add_argument(
'--project_id',
help='Project in which all running BigQuery jobs will be cancelled.')
return parser.parse_args()
def main():
args = get_cmd_line_args()
cancel_jobs(bigquery.Client(project=args.project_id))
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
GoogleCloudPlatform.noreply@github.com
|
bd787adc4fb245d2acb71f579c985593fe27e6d8
|
b7e1d227d41542bf20f92d08bb0d453058cf6d19
|
/search/urls.py
|
1c810a86e3ed3a21dba7ab31f2b6f0dd6f3cd470
|
[] |
no_license
|
rusrom/django-ecommerce
|
dfa35bdb2832abf4077dd0883ec0e5e79ffa9662
|
aebef77713ab7c1c2118d5c190deee5ccfbd3cb9
|
refs/heads/master
| 2020-08-04T23:36:09.610480
| 2019-10-22T14:00:04
| 2019-10-22T14:00:04
| 212,315,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
from django.conf.urls import url
from .views import SearchListView
urlpatterns = [
url(r'^$', SearchListView.as_view(), name='list'),
]
|
[
"rusrom@guyfawkes.33mail.com"
] |
rusrom@guyfawkes.33mail.com
|
0850525da95f116617b72ae302c4e47e6613f311
|
5d4504fe2a4881d20669d2a19392c7ac0d9b2831
|
/Prerequisites/Python27/Lib/site-packages/esphome/components/sensor/max6675.py
|
0ea383d4f693857d29a8459d19d278f8d2e144e2
|
[] |
no_license
|
lanbing8023/esphome-tools
|
5c98f513b768f71742dc68ad68271e22652db9ea
|
6b641e2dcb35130432f2409c50e03ff93af5ceec
|
refs/heads/master
| 2020-05-02T13:24:19.744788
| 2019-03-11T02:25:57
| 2019-03-11T02:25:57
| 177,983,108
| 1
| 1
| null | 2019-03-27T11:48:47
| 2019-03-27T11:48:47
| null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
import voluptuous as vol
from esphome import pins
from esphome.components import sensor, spi
from esphome.components.spi import SPIComponent
import esphome.config_validation as cv
from esphome.const import CONF_CS_PIN, CONF_ID, CONF_NAME, CONF_SPI_ID, \
CONF_UPDATE_INTERVAL
from esphome.cpp_generator import Pvariable, get_variable
from esphome.cpp_helpers import gpio_output_pin_expression, setup_component
from esphome.cpp_types import App
MAX6675Sensor = sensor.sensor_ns.class_('MAX6675Sensor', sensor.PollingSensorComponent,
spi.SPIDevice)
PLATFORM_SCHEMA = cv.nameable(sensor.SENSOR_PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(MAX6675Sensor),
cv.GenerateID(CONF_SPI_ID): cv.use_variable_id(SPIComponent),
vol.Required(CONF_CS_PIN): pins.gpio_output_pin_schema,
vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,
}).extend(cv.COMPONENT_SCHEMA.schema))
def to_code(config):
for spi_ in get_variable(config[CONF_SPI_ID]):
yield
for cs in gpio_output_pin_expression(config[CONF_CS_PIN]):
yield
rhs = App.make_max6675_sensor(config[CONF_NAME], spi_, cs,
config.get(CONF_UPDATE_INTERVAL))
max6675 = Pvariable(config[CONF_ID], rhs)
sensor.setup_sensor(max6675, config)
setup_component(max6675, config)
BUILD_FLAGS = '-DUSE_MAX6675_SENSOR'
def to_hass_config(data, config):
return sensor.core_to_hass_config(data, config)
|
[
"imhsaw@gmail.com"
] |
imhsaw@gmail.com
|
3bcef533264cc999482acad54ede4c319059a45c
|
38c677ab6ad24a70319ca0c3b952e597e62991d1
|
/hackbright_web.py
|
7d365aa03731539222261baff7d67bca228a8647
|
[] |
no_license
|
dmcdekker/project-tracker-flask
|
b24ce0a6c0e53ec7b596876d9a53096f492ee285
|
fe7fd68c9d1988d16bffad3ce45421aa334688ef
|
refs/heads/master
| 2020-03-13T03:35:24.274671
| 2018-04-25T03:41:00
| 2018-04-25T03:41:00
| 130,946,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
"""A web application for tracking projects, students, and student grades."""
from flask import Flask, request, render_template
import hackbright
app = Flask(__name__)
@app.route("/student")
def get_student():
"""Show information about a student."""
github = request.args.get('github')
first, last, github = hackbright.get_student_by_github(github)
grades = hackbright.get_grades_by_github(github)
html = render_template("student_info.html", first=first,
last=last, github=github, grades=grades)
return html
@app.route("/student-search")
def get_student_form():
"""Show form for searching for a student."""
return render_template("student_search.html")
@app.route("/student-add-form")
def student_add_form():
"""Show form for adding a student."""
return render_template("student_add_form.html")
@app.route("/student-add", methods=['POST'])
def student_add():
"""Add a student."""
first = request.form.get('first')
last = request.form.get('last')
github = request.form.get('github')
hackbright.make_new_student(first, last, github)
return render_template('confirmation.html', github=github)
if __name__ == "__main__":
hackbright.connect_to_db(app)
app.run(debug=True, host="0.0.0.0")
|
[
"denisemdekker@gmail.com"
] |
denisemdekker@gmail.com
|
6b97d89ed87820245834127c27c74873a0e8da46
|
29881fa0c087f3d3ce0e27fb51309384266203e1
|
/listings/migrations/0008_listing_expiration_date.py
|
83b7fc387dda027d9abffaaa66e3772326f0ed97
|
[] |
no_license
|
aidant842/mymo
|
0e5ec2a5c73b6755d994467e4afba10141f449ea
|
877e7a38198d1b5effc6c3a63ad12e7166c20a77
|
refs/heads/master
| 2023-07-17T15:30:21.350974
| 2021-08-24T12:43:18
| 2021-08-24T12:43:18
| 340,033,414
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Generated by Django 3.1.6 on 2021-02-23 11:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('listings', '0007_auto_20210222_1610'),
]
operations = [
migrations.AddField(
model_name='listing',
name='expiration_date',
field=models.DateTimeField(null=True),
),
]
|
[
"aidant842@gmail.com"
] |
aidant842@gmail.com
|
3c558cd0e27631a53814488e91e71d01604fee41
|
6b6f2ebcc9fbf7518ec998e1d8d914dd875742d7
|
/djblets/mail/utils.py
|
5e37af0c35710562a82c3b0cbc9f3279caf60a13
|
[] |
no_license
|
pombredanne/djblets
|
da89e0398fb44250b09e7201c940e54cffbebcb3
|
9c4e4a1e6fa71d59b4f555c6ec5699616eb19335
|
refs/heads/master
| 2022-03-02T00:00:21.921658
| 2022-02-08T11:30:56
| 2022-02-08T11:30:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,312
|
py
|
"""General utility functions for working with e-mail."""
from email.utils import escapesre, parseaddr, specialsre
from django.conf import settings
def build_email_address(email, full_name=None):
"""Build an e-mail address for a To/CC/BCC field from a user's information.
Args:
email (unicode):
The e-mail address.
full_name (unicode, optional):
The optional full name associated with the e-mail address.
Returns:
unicode:
A formatted e-mail address intended for a To/CC/BCC field.
"""
if full_name:
escaped_name = escapesre.sub(r'\\\g<0>', full_name)
if specialsre.search(full_name):
escaped_name = '"%s"' % escaped_name
return '%s <%s>' % (escaped_name, email)
return email
def build_email_address_for_user(user):
"""Build an e-mail address for a To/CC/BCC field from a User.
Args:
user (django.contrib.auth.models.User):
The user.
Returns:
unicode:
A formatted e-mail address intended for a To/CC/BCC field.
"""
return build_email_address(email=user.email,
full_name=user.get_full_name())
def build_email_address_via_service(email, full_name=None, service_name=None,
sender_email=None):
"""Build an e-mail address for sending on behalf of a user via a service.
This will construct a formatted e-mail address that can be safely used
in a :mailheader:`From` field without risking being quarantined/rejected
by DMARC rules.
The address will be in the form of "Full Name via Service Name
<sender@domain.tld>".
Args:
email (unicode):
The unformatted e-mail address of the user.
full_name (unicode, optional):
The full name of the user. If not provided, the username in the
e-mail address will be used.
service_name (unicode, optional):
The name of the service sending the e-mail. If not provided,
``settings.EMAIL_DEFAULT_SENDER_SERVICE_NAME`` will be used.
sender_email (unicode, optional):
The unformatted e-mail address for the sending service. If not
provided, the e-mail address in
:django:setting:`DEFAULT_FROM_EMAIL` will be used.
Returns:
unicode:
A formatted e-mail address safe to use in a :mailheader:`From` field.
"""
if not service_name:
# A service name wasn't specified. We'll try to use the one from
# settings, and if that doesn't exist, we'll use the domain name
# from the sender (assuming it parsed, and if it didn't, there are
# bigger problems we're not going to deal with here).
service_name = (
getattr(settings, 'EMAIL_DEFAULT_SENDER_SERVICE_NAME', None) or
email.split('@')[-1]
)
if not sender_email:
sender_email = parseaddr(settings.DEFAULT_FROM_EMAIL)[1]
# We need a name from the user. If a full name wasn't
# available, use the first part of the e-mail address.
if not full_name:
full_name = email.split('@')[0]
return build_email_address(
email=sender_email,
full_name='%s via %s' % (full_name, service_name))
|
[
"christian@beanbaginc.com"
] |
christian@beanbaginc.com
|
0dc21621af4cce33fbdc36b818eb44f102674aca
|
838d23e9590bc855926628d0f7b4ffe73e108565
|
/Python_Programs/Madlibs_generator.py
|
6729ad7eb3eb4a0bd5cb64dd8890f7d84a433a8c
|
[] |
no_license
|
ArnabBasak/PythonRepository
|
ca475b1bc728ede1e033c54f40392f5b4c3494d4
|
388478fd33c4ed654eb6b1cba5e0cbdcfb90cf0e
|
refs/heads/master
| 2021-07-15T17:05:47.435677
| 2020-07-17T09:09:56
| 2020-07-17T09:09:56
| 84,456,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,847
|
py
|
"""
3. Mad Libs Generator
The Goal: Inspired by Summer Son’s Mad Libs project with Javascript.
The program will first prompt the user for a series of inputs a la Mad Libs.
For example, a singular noun, an adjective, etc. Then, once all the information
has been inputted, the program will take that data and place them into a premade
story template. You’ll need prompts for user input, and to then print out the full story at the end with the input included.
Concepts to keep in mind:
Strings
Variables
Concatenation
Print
A pretty fun beginning project that gets you thinking about how to manipulate userinputted data.
Compared to the prior projects, this project focuses far more on strings and concatenating.
Have some fun coming up with some wacky stories for this!
"""
class Madlibs:
def __init__(self):
pass
def userInput(self):
self.name = input("Enter a name of a person ")
self.place = input("Enter the name of a place")
self.animal = input("enter the name of an animal")
self.thing = input("enter the name of random thing")
self.verb = input("enter an action word that is verb")
self.adverb = input("enter an adverb")
self.adjective = input("enter and adjective")
def display(self):
print()
print("Hello {0}".format(self.name))
print("good to see you, how are you?,when did you came to {0},".format(self.place))
print("how is your pet {0},".format(self.animal))
print("what is the status of you buying a {0}".format(self.thing))
print("Do you {0} to office".format(self.verb))
print("Anyway {0} is good for health".format(self.adverb))
print("ofcourse you are a {0} person".format(self.adjective))
ML = Madlibs()
ML.userInput()
ML.display()
|
[
"arnabbasak11@gmail.com"
] |
arnabbasak11@gmail.com
|
e8b8fdcaab85cea4620b71da164f18fd7dbe4449
|
947e71b34d21f3c9f5c0a197d91a880f346afa6c
|
/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/service_check.py
|
ef4585594bb4b167cfabe3c48136d2e3bc2bfe32
|
[
"Apache-2.0",
"MIT",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"OFL-1.1",
"MS-PL",
"AFL-2.1",
"GPL-2.0-only",
"Python-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
liuwenru/Apache-Ambari-ZH
|
4bc432d4ea7087bb353a6dd97ffda0a85cb0fef0
|
7879810067f1981209b658ceb675ac76e951b07b
|
refs/heads/master
| 2023-01-14T14:43:06.639598
| 2020-07-28T12:06:25
| 2020-07-28T12:06:25
| 223,551,095
| 38
| 44
|
Apache-2.0
| 2023-01-02T21:55:10
| 2019-11-23T07:43:49
|
Java
|
UTF-8
|
Python
| false
| false
| 2,076
|
py
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.format import format
from resource_management.core.resources.system import Execute
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
class FalconServiceCheck(Script):
pass
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class FalconServiceCheckLinux(FalconServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
if params.security_enabled:
Execute(format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}"),
user=params.smoke_user)
Execute(format("{falcon_home}/bin/falcon admin -version"),
user=params.smoke_user,
logoutput=True,
tries = 3,
try_sleep = 20
)
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class FalconServiceCheckWindows(FalconServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
service = "FALCON"
Execute(format("cmd /C {smoke_cmd} {service}"), user=params.falcon_user, logoutput=True, tries = 3, try_sleep = 20)
if __name__ == "__main__":
FalconServiceCheck().execute()
|
[
"ijarvis@sina.com"
] |
ijarvis@sina.com
|
f9aa49ceb8dcba436595b48509f035bc2bdb19d5
|
6daf9fe45d498ab0d9c765ee094bca55e5c14291
|
/polyaxon/polyaxon/config_settings/logging.py
|
f7a9ad2b70c05984062122943c0546f3e0302a97
|
[
"MIT"
] |
permissive
|
vaer-k/polyaxon
|
7b23628093f017852735c893cf0a862cc983911e
|
da13c95d23999145763626f836f9be40a6e8f965
|
refs/heads/master
| 2020-03-13T08:19:15.744002
| 2018-04-25T10:37:39
| 2018-04-25T10:37:39
| 131,041,646
| 0
| 0
| null | 2018-04-25T17:32:05
| 2018-04-25T17:32:05
| null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
import os
from polyaxon.utils import ROOT_DIR
LOG_DIRECTORY = ROOT_DIR.child('logs')
if not os.path.exists(LOG_DIRECTORY):
os.makedirs(LOG_DIRECTORY)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s] %(levelname)s %(message)s [%(name)s:%(lineno)s]',
'datefmt': '%d/%b/%Y %H:%M:%S'
},
'simple': {
'format': '%(levelname)8s %(message)s [%(name)s]'
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': '{}/polyaxon_{}.log'.format(LOG_DIRECTORY, os.getpid()),
'maxBytes': 1024 * 1024 * 8, # 8 MByte
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'polyaxon.monitors': {
'handlers': ['console', ],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'level': 'DEBUG',
'propagate': True,
'handlers': ['console', ],
},
},
}
CLUSTER_NOTIFICATION_URL = "https://www.google-analytics.com/collect?v=1&tid=UA-89493331-1"
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
89f9ff1fc57d19bfc0ba90652c5632f79ae10a15
|
1f626975077725c2d6fa364a4ba530675566d6e0
|
/userHandling/urls.py
|
0b00f7539a6ad0671af093574b0d0b192e3910d2
|
[] |
no_license
|
Nsokol44/Carthago
|
54a92acfcceef94feda4c3750f6cb469fd8fd571
|
b5dd0c14c05cb21b059ac48ff0b6416c5194c203
|
refs/heads/master
| 2023-02-17T02:36:46.413918
| 2021-01-19T20:33:44
| 2021-01-19T20:33:44
| 331,069,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='user-home'),
path('register/', views.register, name='register'),
path('user_login/', views.user_login, name='user_login'),
path('user_logout/', views.user_logout, name='user_logout'),
path('profile/', views.profile, name='user_profile')
]
|
[
"nsokol@nicholass-mbp.lan"
] |
nsokol@nicholass-mbp.lan
|
4723274435f75a11c268f333e216e32fd69a877d
|
eb0f13155a6c97a561e4df66f8c96f25b3587eb7
|
/api/sktkr.py
|
5f3313eb06461bf94d6137a07f2892163df44169
|
[] |
no_license
|
jaehyek/stock-analysis
|
3005cdf9a118fa78f854fa8215eda837abf4e75d
|
2f78dacc9aee4254b958ab928ab913c4f3007979
|
refs/heads/master
| 2020-04-20T07:08:00.829939
| 2017-10-27T10:16:32
| 2017-10-27T10:16:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,521
|
py
|
"""
sktkr.py
This script should use sklearn to learn from stock market data.
"""
import io
import pdb
import os
import flask
import datetime as dt
import flask_restful as fr
import numpy as np
import pandas as pd
import sqlalchemy as sql
import sklearn.linear_model as skl
# modules in the py folder:
import api.pgdb as pgdb
# By default, I should train from 20 years of data.
def learn_predict_sklinear(tkr='ABC',yrs=20,mnth='2016-11', features='pct_lag1,slope4,moy'):
"""This function should use sklearn to learn, predict."""
linr_model = skl.LinearRegression()
xtrain_a, ytrain_a, xtest_a, out_df = pgdb.get_train_test(tkr,yrs,mnth,features)
if ((xtrain_a.size == 0) or (ytrain_a.size == 0) or (xtest_a.size == 0)):
return out_df # probably empty too.
# I should fit a model to xtrain_a, ytrain_a
linr_model.fit(xtrain_a,ytrain_a)
# I should predict xtest_a then update out_df
out_df['prediction'] = np.round(linr_model.predict(xtest_a),3).tolist()
out_df['effectiveness'] = np.sign(out_df.pct_lead*out_df.prediction)*np.abs(out_df.pct_lead)
out_df['accuracy'] = (1+np.sign(out_df.effectiveness))/2
algo = 'sklinear'
kmodel = None # sklearn has no kmodel, keras does.
# I should save work to the db:
pgdb.predictions2db(tkr,yrs,mnth,features,algo,out_df,kmodel)
return out_df
def learn_predict_sklinear_yr(tkr='ABC',yrs=20,yr=2016, features='pct_lag1,slope4,moy'):
"""This function should use sklearn to learn and predict for a year."""
empty_df = pd.DataFrame()
yr_l = [empty_df, empty_df] # Ready for pd.concat()
# I should rely on monthy predictions:
for mnth_i in range(1,13):
mnth_s = str(mnth_i).zfill(2)
mnth = str(yr)+'-'+mnth_s
m_df = learn_predict_sklinear(tkr,yrs,mnth, features)
yr_l.append(m_df)
# I should gather the monthy predictions:
yr_df = pd.concat(yr_l, ignore_index=True)
return yr_df
def learn_predict_sklinear_tkr(tkr='ABC',yrs=20, features='pct_lag1,slope4,moy'):
"""This function should use sklearn to learn and predict for a tkr."""
# From db, I should get a list of all months for tkr:
mnth_l = pgdb.getmonths4tkr(tkr,yrs)
# I should rely on monthy predictions:
empty_df = pd.DataFrame()
tkr_l = [empty_df, empty_df] # Ready for pd.concat()
for mnth_s in mnth_l:
m_df = learn_predict_sklinear(tkr,yrs,mnth_s, features)
tkr_l.append(m_df)
# I should gather the monthy predictions:
tkr_df = pd.concat(tkr_l, ignore_index=True)
return tkr_df
'bye'
|
[
"verystrongjoe@gmail.com"
] |
verystrongjoe@gmail.com
|
c36737d70f36cd5e2a9ae0a8957a169c2ed18c6c
|
8417564be258bf1b8ed1c75cf8cdbcea346239b1
|
/venv/bin/pip3.5
|
26454f64bd409142cbde634ad3a4aa6107b1ac24
|
[] |
no_license
|
rdahal35/djangoproject
|
abebe56f7e5be2a45fb656b030aed2b9c9c6e201
|
8aa0d967018d6eb4097235135661cda2c46d67a3
|
refs/heads/master
| 2021-09-09T23:02:17.107653
| 2018-03-20T05:40:26
| 2018-03-20T05:40:26
| 125,965,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
5
|
#!/home/rupesh/python/django/yetanotherproject/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"rdahal35@gmail.com"
] |
rdahal35@gmail.com
|
cbb1f4d634b5c2931e92070446640ee4d89d33d8
|
efd55bc63da8ab6ee964ec82bd0b761fd36107cc
|
/leetcode/number-of-enclaves.py
|
ee0c5737871c26a66fb05b99c26e3ff45578e1bb
|
[] |
no_license
|
gsantam/competitive-programming
|
f9a2c9999470eeae9ef4aada6af43b91a65fcb50
|
0b208516a6ae3e72bc7b79ef0ac83dcbfa100496
|
refs/heads/master
| 2021-06-20T23:27:30.274275
| 2021-06-20T19:44:51
| 2021-06-20T19:44:51
| 162,201,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
class Solution:
def numEnclaves(self, A: List[List[int]]) -> int:
visited = set()
numEnclaves = 0
for i in range(len(A)):
for j in range(len(A[0])):
if (i,j) not in visited and A[i][j]==1:
can_visit_boundary = False
stack = [(i,j)]
total_lands = 0
while len(stack)>0:
element = stack.pop()
x = element[0]
y = element[1]
if element not in visited and A[x][y]==1:
total_lands+=1
visited.add(element)
if x+1>=len(A):
can_visit_boundary = True
else:
stack.append((x+1,y))
if y+1>=len(A[0]):
can_visit_boundary = True
else:
stack.append((x,y+1))
if x-1<0:
can_visit_boundary = True
else:
stack.append((x-1,y))
if y-1<0:
can_visit_boundary = True
else:
stack.append((x,y-1))
if not can_visit_boundary:
numEnclaves+=total_lands
return numEnclaves
|
[
"santamaria.guille@gmail.com"
] |
santamaria.guille@gmail.com
|
f472551f1d884a042278fd5068b8812e440a9674
|
f73bcada5ab8432d2af07b5cb7fd7a38109d3e3a
|
/.history/parser_20201108183309.py
|
ccfb07d5c586abb5736efc84264c3f2c979c39ba
|
[] |
no_license
|
mariajbp/gedcomparser
|
837bf4ae5628a81e535d233c7c35313c6d86d78c
|
6fc55899e5a82c4071991ab94a344b64c014b84d
|
refs/heads/master
| 2023-01-23T09:01:27.459597
| 2020-11-19T23:58:53
| 2020-11-19T23:58:53
| 310,900,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,988
|
py
|
#!/usr/bin/python3
#python3 parser.py input/bible.gedcom > test.txt
import sys
from re import *
filename = sys.argv[1].split('/')[1]
assetPath = "assets"
indPath = "individuals"
famPath = "families"
cssPath = "assets/gedcom.css"
def createIndi(ik,iv):
f = open('assets/individuals/'+ik+'.html', 'w')
f.write('<h4> <a href=\"../index.html\"> return to index </a> </h4>')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="../index.css"></head>\n')
f.write('<h1> Código do individuo: ' + ik + '</h1>')
for k, v in iv.items():
f.write('<b>'+str(k) + ':</b> '+ str(v) + '\n')
f.close()
def createFamily(fk,fi):
f = open('assets/families/'+fk+'.html', 'w')
f.write('<h4> <a href=\"../index.html\"> return to index </a> </h4>')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="../index.css"></head>\n')
f.write('<h1> Código da familia: ' + fk + '</h1>')
for k, v in fi.items():
f.write('<b>'+str(k) + ':</b> '+ str(v) +'\r\n')
f.close()
def createIndex(fam,indi):
f = open("assets/index.html", 'w')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="index.css"></head>\n')
f.write('<h1> Ficheiro: ' + filename + '</h1>')
f.write('<div class="row"><div class="column"><h2>Familias</h2>')
for keyf in fam:
f.write('<li> <a href=\"'+famPath+'/'+keyf+'.html\">'+keyf+'</a></li>\n')
f.write('</ul> </div>')
f.write('<div class="column"><h2>Individuos</h2>')
for keyi in indi:
f.write('<li> <a href=\"'+indPath+'/'+keyi+'.html\">'+keyi+'</a></li>\n')
f.write('</ul></div></div>')
f.close()
#contruir um individuo e as suas carateristicas
BG = {}
def procIndi(s,i):
indi = {}
name = search(r'\bNAME\s+(.*)', i)
title = search(r'\bTITL\s+(.*)', i)
gender = search(r'\bSEX\s+(.*)', i)
if name:
indi['Name']= name.group(1)
name = findall (r'\bFAMS\s+@(.*)@',i)
if title:
indi['Title'] = title.group(1)
if gender:
indi['Gender'] = gender.group(1)
BG[s] = indi
BF = {}
def procFam(f,i):
fam={}
h = search(r'\bHUSB\s+@(.*)@',i)
if h:
fam['Husband'] = h.group(1)
w = search(r'\bWIFE\s+@(.*)@',i)
if w:
fam['Wife'] = w.group(1)
fam['Children'] = findall (r'\bCHIL\s+@(.*)@',i)
BF[f] = fam
print(fam['Husband'])
def process(t):
items = split(r'\n0',t)
for i in items:
z = search(r'@(I\d+)@ *INDI', i) #procura todos os individuos
if z:
procIndi(z.group(1),i)
f = search(r'@(F\d+)@ *FAM', i) #procura todas as familias
if f:
procFam(f.group(1),i)
with open(sys.argv[1], 'r') as f :
gedcom = f.read()
process(gedcom)
createIndex(BF.keys(), BG.keys())
for k,v in BF.items():
createFamily(k,v)
for k,v in BG.items():
createIndi(k,v)
|
[
"mariajbp00@gmail.com"
] |
mariajbp00@gmail.com
|
a9710d3e6cebed174b49ca7389a2ff5cedf15dbf
|
33c23cb18917d6b1255fa45a4f1944f1774fdb99
|
/scripts/local_lcs_pam_250.py
|
0b29540cf2ff0ac610c6d0b89c4448edfd3265f0
|
[] |
no_license
|
sjuvekar/Bioinformatics
|
ff0c0f4d4b77c322ce59cd98ae0036d71305710f
|
97bf341f2b8b63b7eba78e736be6703a2f651e90
|
refs/heads/master
| 2020-05-17T00:18:18.056611
| 2013-12-11T02:46:34
| 2013-12-11T02:46:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
#!/usr/bin/env python
from util.lcs_util import LCSUtil
import sys
if __name__ == "__main__":
f = open(sys.argv[1])
dna = LCSUtil(f.readline().strip())
dna.parse_score_matrix("../matrices/PAM250.txt")
other_dna = LCSUtil(f.readline().strip())
(best_score, seq1, seq2) = dna.graph_based_local_alignment(other_dna, 5, True)
print best_score
print seq1
print seq2
|
[
"sjuvekar@gmail.com"
] |
sjuvekar@gmail.com
|
a2785bd0b41bd5bb7bb9d3b20b2e2922d476bae4
|
a9f38bb28ff9bd04b151d86c653cde9f46768c7c
|
/medium/validateBST.py
|
ce4596ddfbf18317b96c151cb041324fd72f9669
|
[] |
no_license
|
Xynoclafe/leetcode
|
02388516b10b8ee6bec6ee1b91ab5681c3254d33
|
4a80f02683e7fc14cb49c07170651ea3eeb280ac
|
refs/heads/master
| 2020-12-01T21:05:44.656581
| 2020-02-02T09:05:32
| 2020-02-02T09:05:32
| 230,770,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
def inOrderT(root, inOrder):
if root == None:
return
inOrderT(root.left, inOrder)
inOrder.append(root.val)
inOrderT(root.right, inOrder)
inOrder = []
inOrderT(root, inOrder)
for i in range(len(inOrder) - 1):
if inOrder[i] >= inOrder[i + 1]:
return False
return True
|
[
"gokulprem.94@gmail.com"
] |
gokulprem.94@gmail.com
|
1323bf6ec05675b2462147830b71f4051ac71fc7
|
1dc67a30f9af553243088668d51bc4e75e87d83d
|
/pythonNet/udp_client.py
|
2d2eeb35ecd220d4b54e3db4f2dd9be943735345
|
[] |
no_license
|
houyinhu/AID1812
|
00db45b3e8905bd069b31f2e7689f83bca3fa61f
|
8eeb9f06ed9f4e742d480354ef0e336dfe8c2f17
|
refs/heads/master
| 2020-04-27T16:33:57.275890
| 2019-04-10T01:09:51
| 2019-04-10T01:09:51
| 174,486,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
from socket import *
#服务器地址
host = '127.0.0.1'
port = 9999
addr = (host,port)
#创建套接字
sockfd = socket(AF_INET,SOCK_DGRAM)
#收发消息
while True:
#发送消息
data = input("Msg>>")
if not data:
break
sockfd.sendto(data.encode(),addr)
#接受消息
msg,addr = sockfd.recvfrom(1024)
print("Receive from server:",msg.decode())
sockfd.close()
|
[
"ahu@163.com"
] |
ahu@163.com
|
a39c4901cd3ea3b970eac16df6e9449edf83b9bf
|
ed0780889408c9968f3c987fbace61aa11770ba1
|
/rythmize/__init__.py
|
7fbe776402cfcb70ac2e582492b9d2840232a719
|
[] |
no_license
|
maleksal/rythmize-api
|
b45af58c594e882dbbe248a479d6f88064332cf4
|
6d538a7eae617c32b5405c8c92f1cd4f7f42ce3c
|
refs/heads/main
| 2023-07-18T18:40:54.718307
| 2021-09-13T14:14:17
| 2021-09-13T14:14:17
| 304,356,537
| 5
| 1
| null | 2020-11-04T13:43:10
| 2020-10-15T14:43:15
|
Python
|
UTF-8
|
Python
| false
| false
| 875
|
py
|
"""
Create flask application.
"""
import os
from flask import Flask
from .admin import admin_settings
from .api.v1.views import api_views
from .extensions import cors, db, guard, ma, mail
from .models.user import User
def create_app(config_env):
"""Initiate app using Flask Factory Pattern."""
app = Flask(__name__)
app.config.from_object(config_env)
# Initialize extentions
db.init_app(app) # Database
ma.init_app(app) # Serilizer && Deserializer extension
guard.init_app(app, User) # Flask-praetorian
cors.init_app(app) # Flask-cors
mail.init_app(app) # Flask-Mail
# setup admin panel
admin_settings.init_app(app)
admin_settings.name = 'rythmize-panel'
admin_settings.template_mode = 'bootstrap3'
# register routes
app.register_blueprint(api_views)
return app
|
[
"malek.salem.14@gmail.com"
] |
malek.salem.14@gmail.com
|
433a20c2b321ceee12bcc5ba041c9f6638a6c4b4
|
e2a6cc522daca1a0060644fcc487b684a0849c34
|
/ecommerce_project/login_app/forms.py
|
ad20e0f8b1d8ef135c0e16ae8659bb72fb9fe0b8
|
[] |
no_license
|
Tanzin-Ul-Islam/Django_Ecommerce_sslcommerz
|
0c341ab0f045479a8cd7fce6a736ebfa62db55c4
|
fdec49a5c4ed7e943d4e7c8778c8f254117f87cd
|
refs/heads/main
| 2023-02-08T17:19:11.283296
| 2020-12-29T22:02:31
| 2020-12-29T22:02:31
| 321,987,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
from django.forms import ModelForm
from login_app.models import User, Profile
from django.contrib.auth.forms import UserCreationForm
class ProfileForm(ModelForm):
class Meta:
model = Profile
exclude = ('user',)
class SignUpForm(UserCreationForm):
class Meta:
model = User
fields = ('email', 'password1', 'password2',)
|
[
"tanzin.cse@gmail.com"
] |
tanzin.cse@gmail.com
|
dd8412c8a6aece59b5d524a5c1c11c537bc38c52
|
fc29ccdcf9983a54ae2bbcba3c994a77282ae52e
|
/Leetcode/325-presum.py
|
c3e5f9dbbe27aeaad335dcc81cdb202360b980a3
|
[] |
no_license
|
linnndachen/coding-practice
|
d0267b197d9789ab4bcfc9eec5fb09b14c24f882
|
5e77c3d7a0632882d16dd064f0aad2667237ef37
|
refs/heads/master
| 2023-09-03T19:26:25.545006
| 2021-10-16T16:29:50
| 2021-10-16T16:29:50
| 299,794,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
from typing import List
class Solution:
def maxSubArrayLen(self, nums: List[int], k: int) -> int:
memo = {}
res = 0
cur_sum = 0
memo[cur_sum] = -1
for idx, val in enumerate(nums):
cur_sum += val
if cur_sum - k in memo:
res = max(res, idx - memo[cur_sum - k])
if cur_sum not in memo:
memo[cur_sum] = idx
return res
|
[
"lchen.msc2019@ivey.ca"
] |
lchen.msc2019@ivey.ca
|
b7a63f283bc4352d3165fd8aae7a005711aa608d
|
c97b9ae1bf06757ba61f90905e4d9b9dd6498700
|
/venv/Lib/site-packages/tensorflow/core/protobuf/data/experimental/snapshot_pb2.py
|
6ff5b17b88d56e9937049ac3adbb5748da83bf01
|
[] |
no_license
|
Rahulk1p/image-processor
|
f7ceee2e3f66d10b2889b937cdfd66a118df8b5d
|
385f172f7444bdbf361901108552a54979318a2d
|
refs/heads/main
| 2023-03-27T10:09:46.080935
| 2021-03-16T13:04:02
| 2021-03-16T13:04:02
| 348,115,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:6ddffc53da063702a6f6211385bdd679c42227bb1dd87be76feeacb69fc30bb6
size 10684
|
[
"rksc.k1p@gmail.com"
] |
rksc.k1p@gmail.com
|
583cfa8145a469e7dbf7f5fa01e42d36462ea762
|
c5d6e21744f10c6e57d58b57bba2763b82a9726b
|
/Bimestre_04_Aula_04/02_letras.py
|
bed4bf61752730b5ca1c49735df58909d70d1e6b
|
[] |
no_license
|
valeriacavalcanti/ALP-2020-R
|
bf32af707d49db650deb6d122a1abdf58d94ae4f
|
62e0be861ad7439b99ae5d0b0e14d97c887424c7
|
refs/heads/main
| 2023-05-05T02:05:00.128872
| 2021-06-04T10:30:05
| 2021-06-04T10:30:05
| 316,784,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
frase = input('Frase: ')
letras = []
for s in frase:
if ((s >= 'a' and s <= 'z') or (s >= 'A' and s <= 'Z')) and (s not in letras):
letras.append(s)
print(letras)
|
[
"valeria.cavalcanti@ifpb.edu.br"
] |
valeria.cavalcanti@ifpb.edu.br
|
e922ca8c459ceb36f136a75e1fe68b947faf0553
|
90d3af65fc9900f2abb7eaa7631646856e115da3
|
/COMP9021/challenge/merge_strings.py
|
09d6d075032614f7aafc9a7756415ff50498952b
|
[] |
no_license
|
Tim-hyx/UNSW-Courses
|
d414b79b6c5b428be12456ba85e1757ac871535b
|
b7031ea9ac833b5a396e7938ef73cc335a2e37b7
|
refs/heads/main
| 2023-07-10T19:48:34.731340
| 2021-08-10T02:39:14
| 2021-08-10T02:39:14
| 300,894,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
# Written by Eric Martin for COMP9021
def can_merge(string_1, string_2, string_3):
if not string_1:
return string_2 == string_3
if not string_2:
return string_1 == string_3
if string_1[0] == string_3[0]\
and can_merge(string_1[1 :], string_2, string_3[1 :]):
return True
if string_2[0] == string_3[0]:
return can_merge(string_1, string_2[1 :], string_3[1 :])
return False
def report_failure():
print('No string can be merged from the other two.')
ranks = 'first', 'second', 'third'
shortest, in_between, longest =\
sorted(zip(ranks,
(input(f'Please input the {rank} string: ') for rank in ranks)
), key=lambda x: len(x[1])
)
if not longest[1]:
print('Any string can be obtained from the other two.')
elif not shortest[1]:
if in_between[1] == longest[1]:
print(f'The {in_between[0]} and {longest[0]} strings can be obtained '
'by merging the other two.'
)
else:
report_failure()
elif len(longest[1]) != len(shortest[1]) + len(in_between[1])\
or not can_merge(shortest[1], in_between[1], longest[1]):
report_failure()
else:
print(f'The {longest[0]} string can be obtained by merging the other two.')
|
[
"noreply@github.com"
] |
Tim-hyx.noreply@github.com
|
510d52809fb94163286af1dd16c5f0d892dc29df
|
3b98ee18977177e10b57e6162a03204e3774d3b8
|
/Kirk_Byers_Nornir_Automation/env/lib/python3.8/site-packages/nornir_napalm/plugins/tasks/__init__.py
|
f48b5b83218e80cf28db8c5fb893eb17186ed306
|
[] |
no_license
|
mattmiller87/practice
|
0a3d1cae1283abb683dfab0af86e6c569a6104e1
|
9655a8020038e0f6dfe8df842867debac0fcb1e3
|
refs/heads/master
| 2022-06-23T23:47:50.350379
| 2022-06-14T13:30:51
| 2022-06-14T13:38:56
| 51,970,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
from .napalm_cli import napalm_cli
from .napalm_configure import napalm_configure
from .napalm_get import napalm_get
from .napalm_ping import napalm_ping
from .napalm_validate import napalm_validate
__all__ = (
"napalm_cli",
"napalm_configure",
"napalm_get",
"napalm_ping",
"napalm_validate",
)
|
[
"mattmiller87@gmail.com"
] |
mattmiller87@gmail.com
|
1684baab9978a790c1a1abaa5ba07d46c9297150
|
1798bed996931a9e7b6c9a469f86e24589fa9cf0
|
/huxley/api/tests/test_committee.py
|
14041809495b2be4aecd91ede104035ddcb95cdd
|
[
"BSD-3-Clause"
] |
permissive
|
joannejqi/huxley
|
8eae38af706b4f5d714736c99741541d2c3aae73
|
b4b5cac213c9605599900eca8ed0225086a5cf4c
|
refs/heads/master
| 2020-05-29T08:41:22.712756
| 2016-10-02T22:18:12
| 2016-10-02T22:18:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,822
|
py
|
# Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
from huxley.api import tests
from huxley.api.tests import auto
from huxley.utils.test import TestCommittees, TestUsers
class CommitteeDetailGetTestCase(auto.RetrieveAPIAutoTestCase):
url_name = 'api:committee_detail'
@classmethod
def get_test_object(cls):
return TestCommittees.new_committee()
def test_anonymous_user(self):
self.do_test()
class CommitteeDetailPutTestCase(tests.UpdateAPITestCase):
url_name = 'api:committee_detail'
params = {'name':'DISC',
'special':True}
def setUp(self):
self.committee = TestCommittees.new_committee()
def test_anonymous_user(self):
'''Unauthenticated users shouldn't be able to update committees.'''
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PUT')
def test_authenticated_user(self):
'''Authenticated users shouldn't be able to update committees.'''
TestUsers.new_user(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PUT')
def test_superuser(self):
'''Superusers shouldn't be able to update committees.'''
TestUsers.new_superuser(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PUT')
class CommitteeDetailPatchTestCase(tests.PartialUpdateAPITestCase):
url_name = 'api:committee_detail'
params = {'name':'DISC',
'special':True}
def setUp(self):
self.committee = TestCommittees.new_committee()
def test_anonymous_user(self):
'''Unauthenticated users shouldn't be able to update committees.'''
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PATCH')
def test_authenticated_user(self):
'''Authenticated users shouldn't be able to update committees.'''
TestUsers.new_user(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PATCH')
def test_superuser(self):
'''Superusers shouldn't be able to update committees.'''
TestUsers.new_superuser(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.committee.id, params=self.params)
self.assertMethodNotAllowed(response, 'PATCH')
class CommitteeDetailDeleteTestCase(auto.DestroyAPIAutoTestCase):
url_name = 'api:committee_detail'
@classmethod
def get_test_object(cls):
return TestCommittees.new_committee()
def test_anonymous_user(self):
'''Anonymous users cannot delete committees.'''
self.do_test(expected_error=auto.EXP_DELETE_NOT_ALLOWED)
def test_authenticated_user(self):
'''Authenticated users cannot delete committees.'''
TestUsers.new_user(username='user', password='user')
self.do_test(
username='user', password='user',
expected_error=auto.EXP_DELETE_NOT_ALLOWED)
def test_superuser(self):
'''Superusers cannot delete committees.'''
TestUsers.new_superuser(username='user', password='user')
self.do_test(
username='user', password='user',
expected_error=auto.EXP_DELETE_NOT_ALLOWED)
class CommitteeListGetTestCase(tests.ListAPITestCase):
url_name = 'api:committee_list'
def test_anonymous_user(self):
'''Anyone should be able to access a list of all the committees.'''
c1 = TestCommittees.new_committee(name='DISC', delegation_size=100)
c2 = TestCommittees.new_committee(name='JCC', special=True,
delegation_size=30)
response = self.get_response()
self.assertEqual(response.data, [
{'delegation_size': c1.delegation_size,
'special': c1.special,
'id': c1.id,
'full_name': c1.full_name,
'name': c1.name},
{'delegation_size': c2.delegation_size,
'special': c2.special,
'id': c2.id,
'full_name': c2.full_name,
'name': c2.name}])
class CommitteeListPostTestCase(tests.CreateAPITestCase):
url_name = 'api:committee_list'
params = {'name': 'DISC',
'full_name': 'Disarmament and International Security',
'delegation_size': 100}
def test_anonymous_user(self):
'''Unauthenticated users shouldn't be able to create committees.'''
response = self.get_response(self.params)
self.assertMethodNotAllowed(response, 'POST')
def test_authenticated_user(self):
'''Authenticated users shouldn't be able to create committees.'''
TestUsers.new_user(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.params)
self.assertMethodNotAllowed(response, 'POST')
def test_superuser(self):
'''Superusers shouldn't be able to create committees.'''
TestUsers.new_superuser(username='user', password='user')
self.client.login(username='user', password='user')
response = self.get_response(self.params)
self.assertMethodNotAllowed(response, 'POST')
|
[
"k.mehta@berkeley.edu"
] |
k.mehta@berkeley.edu
|
acf4f07c5b846474dd3390e18f33eb6453daf203
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/dominator_20200827125530.py
|
4033c37b5502fead050650b44dcd3b8bd2988b5c
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
def leader(A):
# create a dictionary the element that occurs more than n//2 times
# once I find it I return the index
store = {}
candidate = -1
for i in A:
if i in store:
store[i] +=1
else:
store[i] = 1
for i in store:
if store[i] > (len(A) // 2):
candidate = i
for k in range(len(A)):
leader([3,4,3,2,3,-1,3,3])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
0fe855a596925a8feaac09b26bd6830c252b6375
|
d996edcd595c565c5725a16286ce8d338af67246
|
/src/rl/environments/bandit.py
|
40ded8fedcd9633048d32e37660a86d830ceaa5b
|
[] |
no_license
|
preddy5/dltemplate
|
fbbfce7660c451495e255cf8d8437e4b4e207f9c
|
77b04b767cbd4914e0a3d3609c645e475aabcc43
|
refs/heads/master
| 2020-04-28T19:37:04.893001
| 2019-03-13T13:35:04
| 2019-03-13T13:35:04
| 175,517,056
| 1
| 1
| null | 2019-03-13T23:59:40
| 2019-03-13T23:59:39
| null |
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
"""
From https://courses.edx.org/courses/course-v1:Microsoft+DAT257x+2T2018/course/ (errors and redundant code included ;)
"""
import numpy as np
import sys
# Interface
class Environment(object):
def reset(self):
raise NotImplementedError('Subclasses must override reset.')
def actions(self):
raise NotImplementedError('Subclasses must override actions.')
def step(self, action):
raise NotImplementedError('Subclasses must override step.')
class ActionSpace(object):
def __init__(self, actions):
self.actions = actions
self.n = len(actions)
# BanditEnv Environment
class BanditEnv(Environment):
def __init__(self, n_actions=10, distribution='bernoulli', evaluation_seed='387'):
super(BanditEnv, self).__init__()
self.action_space = ActionSpace(range(n_actions))
self.distribution = distribution
np.random_seed = evaluation_seed
self.is_reset = False
self.reward_parameters = None
if distribution == 'bernoulli':
self.reward_parameters = np.random.rand(n_actions)
elif distribution == 'normal':
self.reward_parameters = (np.random.randn(n_actions), np.random.rand(n_actions))
elif distribution == 'heavy-tail':
self.reward_parameters = np.random.rand(n_actions)
else:
print('Please use a supported reward distribution', flush=True)
sys.exit(0)
if distribution == 'normal':
self.optimal_arm = np.argmax(self.reward_parameters)
else:
self.optimal_arm = np.argmax(self.reward_parameters[0])
def reset(self):
self.is_reset = True
def actions(self):
return range(self.action_space.n)
def compute_gap(self, action):
if self.distribution == 'normal':
gap = np.abs(self.reward_parameters[0][self.optimal_arm] - self.reward_parameters[0][action])
else:
gap = np.abs(self.reward_parameters[self.optimal_arm] - self.reward_parameters[action])
return gap
def step(self, action):
self.is_reset = False
valid_action = True
reward = 0
# gap = 0
if action is None or action < 0 or action >= self.action_space.n:
print('Algorithm chose an invalid action; reset reward to -inf', flush=True)
reward = float('-inf')
# gap = float('inf')
valid_action = False
if self.distribution == 'bernoulli':
if valid_action:
reward = np.random.binomial(1, self.reward_parameters[action])
# gap = self.reward_parameters[self.optimal_arm] - self.reward_parameters[action]
elif self.distribution == 'normal':
if valid_action:
reward = self.reward_parameters[0][action] + self.reward_parameters[1][action] * np.random.randn()
# gap = self.reward_parameters[0][self.optimal_arm] - self.reward_parameters[0][action]
elif self.distribution == 'heavy_tail':
if valid_action:
reward = self.reward_parameters[action] + np.random.standard_cauchy()
# gap = self.reward_parameters[self.optimal_arm] - self.reward_parameters[action]
else:
print('Please use a supported reward distribution', flush=True)
sys.exit(0)
return None, reward, self.is_reset, ''
|
[
"markmo@me.com"
] |
markmo@me.com
|
a89a60b286af90a01447471f18579f1512a3c20b
|
a8314fb4e71a229f2288ca0588bbb3ebd58b7db0
|
/leet/merge_two_sorted_lists/main.py
|
810455adabdede95f2ff4ba1f28c025393049b90
|
[] |
no_license
|
blhwong/algos_py
|
6fc72f1c15fe04f760a199535a0df7769f6abbe6
|
9b54ad6512cf0464ecdd084d899454a99abd17b2
|
refs/heads/master
| 2023-08-30T17:45:51.862913
| 2023-07-24T18:56:38
| 2023-07-24T18:56:38
| 264,782,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
from data_structures.list_node import ListNode
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
curr1 = l1
curr2 = l2
head = None
tail = None
def add_to_tail(val):
nonlocal head
nonlocal tail
if not head:
head = ListNode(val)
tail = head
else:
if not head.next:
head.next = tail
tail.next = ListNode(val)
tail = tail.next
while curr1 and curr2:
if curr1.val < curr2.val:
add_to_tail(curr1.val)
curr1 = curr1.next
else:
add_to_tail(curr2.val)
curr2 = curr2.next
if curr1:
if tail:
tail.next = curr1
else:
return curr1
elif curr2:
if tail:
tail.next = curr2
else:
return curr2
return head
|
[
"brandon@yerdle.com"
] |
brandon@yerdle.com
|
88748958f155fdf6f5309640b3a89f748bad225e
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingAverage_Seasonal_Second_SVR.py
|
14fcfd600f9d3e0f16af66b34160eb3958b35e58
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 163
|
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingAverage'] , ['Seasonal_Second'] , ['SVR'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
712e14f4ca6830112d3e199b7a2dddaf97f50512
|
9cc1b58d0319308da98187d071295b2fabf1f080
|
/0608/a0608_03_matplotlib模組試用2.py
|
97333bfaef57ef352a284ad83d82ab346c07fd2f
|
[
"MIT"
] |
permissive
|
Arwen0905/Python_Test
|
60d1dee383c9cf27df6b93cfde7884c91092229c
|
c75357e4354a684a9fae41f751dae60d4cf0716c
|
refs/heads/master
| 2023-01-13T13:14:55.355898
| 2020-10-31T18:52:07
| 2020-10-31T18:52:07
| 265,150,874
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
import matplotlib.pyplot as plt
import random
# list_x1 = [1,5,7,9,13,16]
# list_y1 = [15,50,80,40,70,50]
# list_x2 = [2,6,8,11,14,16]
# list_y2 = [10,40,30,50,80,60]
list_x1 = []
list_y1 = []
list_x2 = []
list_y2 = []
for i in range(4):
list_x1.append(random.randint(1,50))
list_y1.append(random.randint(1,50))
for i in range(6):
list_x2.append(random.randint(1,50))
list_y2.append(random.randint(1,50))
plt.plot(list_x1, list_y1,color="#ffff55",linewidth="5",\
linestyle="-.",label=list_x2)
plt.plot(list_x2, list_y2, color="#ff2244",linewidth=5,\
linestyle=":",label=list_y2)
# 顯示圖例
plt.legend()
# plt.xlim(0,18) #顯示x軸的線條→ 顯示比例
# plt.ylim(0,120) #顯示y軸的線條→ 顯示比例
plt.title("Pocket Money") #上方標題
plt.xlabel("Age") # 列標題
plt.ylabel("Money") #欄標題
plt.gca().set_facecolor('black') #背景設定
plt.show()
|
[
"qq23378452@gmail.com"
] |
qq23378452@gmail.com
|
3f9e0de70db659c1bc01acd9909894dc74a0c1a2
|
a87eed5b49858ee547c2363a9d29a5c625db254f
|
/examples/log_requests.py
|
035cc4e95d5243acbcfef0c810012b8089ee290d
|
[
"BSD-2-Clause"
] |
permissive
|
parkerhancock/requests-cache
|
3a58d8829eba27796dd551d98d93237f24fd2179
|
e3ae526cba37a4ea2d8a48b05aaeff062847c644
|
refs/heads/master
| 2023-05-31T11:59:29.964100
| 2021-07-09T21:01:11
| 2021-07-09T21:01:11
| 371,452,485
| 1
| 0
|
BSD-2-Clause
| 2021-07-09T14:03:50
| 2021-05-27T17:25:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
#!/usr/bin/env python3
"""
An example of testing the cache to prove that it's not making more requests than expected.
"""
from contextlib import contextmanager
from logging import basicConfig, getLogger
from unittest.mock import patch
import requests
from requests_cache import CachedSession
from requests_cache.session import OriginalSession, set_response_defaults
basicConfig(level='INFO')
logger = getLogger('requests_cache.examples')
# Uncomment for more verbose debug output
# getLogger('requests_cache').setLevel('DEBUG')
@contextmanager
def log_requests():
"""Context manager that mocks and logs all non-cached requests"""
real_response = set_response_defaults(requests.get('http://httpbin.org/get'))
with patch.object(OriginalSession, 'send', return_value=real_response) as mock_send:
session = CachedSession('cache-test', backend='sqlite')
session.cache.clear()
yield session
cached_responses = session.cache.responses.values()
logger.debug('All calls to Session._request():')
logger.debug(mock_send.mock_calls)
logger.info(f'Responses cached: {len(cached_responses)}')
logger.info(f'Requests sent: {mock_send.call_count}')
def main():
"""Example usage; replace with any other requests you want to test"""
with log_requests() as session:
for i in range(10):
response = session.get('http://httpbin.org/get')
logger.debug(f'Response {i}: {type(response).__name__}')
if __name__ == '__main__':
main()
|
[
"jordan.cook@pioneer.com"
] |
jordan.cook@pioneer.com
|
85ed8f1963ad348e607ad90fca7242976a2638a6
|
eb99769b7c9e0eb1cf3b88878934a400ba42f0bf
|
/users/migrations/0002_auto_20180614_1023.py
|
7649e59641c40a952777371d5d47701aa6f2a3bf
|
[] |
no_license
|
Levalife/petsterr2.0
|
3657b200b9e236b81896f4ac104932e85517ceb3
|
43d20e65362596d72942fe624c29fd4f84d90f9a
|
refs/heads/master
| 2023-01-13T04:58:23.496527
| 2018-09-13T09:50:48
| 2018-09-13T09:50:48
| 203,134,329
| 0
| 0
| null | 2023-01-05T21:55:18
| 2019-08-19T08:48:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,743
|
py
|
# Generated by Django 2.0.6 on 2018-06-14 10:23
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('countries', '0003_auto_20180614_1023'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='api_key',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='countries.Country'),
),
migrations.AddField(
model_name='userprofile',
name='date_of_birth',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='userprofile',
name='facebook_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='gender',
field=models.CharField(blank=True, choices=[('male', 'male'), ('female', 'female')], max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='google_access_key',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='locale',
field=models.CharField(blank=True, default='en', max_length=10, null=True),
),
migrations.AddField(
model_name='userprofile',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AddField(
model_name='userprofile',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to='users/pictures/%Y/%m/%d'),
),
migrations.AddField(
model_name='userprofile',
name='premium',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='userprofile',
name='referral_code',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='timezone',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='twitter_access_key',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='twitter_access_secret',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='twitter_id',
field=models.CharField(blank=True, max_length=255, null=True, unique=True),
),
migrations.AddField(
model_name='userprofile',
name='user_ip',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterModelTable(
name='userprofile',
table='user_profiles',
),
]
|
[
"levushka14@gmail.com"
] |
levushka14@gmail.com
|
b58bc1f53f284b177d9a168c3cd8522e9ce5c134
|
30d02ec6dd309dced011d266ca40bace293fb23e
|
/20210315/swapping_nodes_in_a_linked_list.py
|
6c72e9f8fb965b8d2a1629cd4d2d42b7e2144379
|
[] |
no_license
|
jyeoniii/algorithm
|
b72f5e9f7fe63098c251bcc1585787ba39ca750c
|
7d80e27aec8fbac936911ee78a92c47b00daa3ba
|
refs/heads/master
| 2023-04-15T01:39:41.149528
| 2021-04-22T13:55:58
| 2021-04-22T13:55:58
| 316,533,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,747
|
py
|
# https://leetcode.com/explore/challenge/card/march-leetcoding-challenge-2021/589/week-2-march-8th-march-14th/3671/
from common.common_data import ListNode
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
length = 0
node, node1 = head, None
while node:
length += 1
if length == k:
node1 = node
node = node.next
node = head
while length - k > 0:
node = node.next
length -= 1
node2 = node
node1.val, node2.val = node2.val, node1.val
return head
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
slow, fast = head, head
while k > 1:
fast = fast.next
k -= 1
node1 = fast
while fast.next:
slow, fast = slow.next, fast.next
node2 = slow
node1.val, node2.val = node2.val, node1.val
return head
# Swapping node itself, not just a value
class Solution:
def swapNodes(self, head: ListNode, k: int) -> ListNode:
prev, nodes = {}, []
node = head
while node.next:
prev[node.next] = node
nodes.append(node)
node = node.next
nodes.append(node)
node1, node2 = (nodes[k - 1], nodes[len(nodes) - k]) if k - 1 <= len(nodes) - k else (nodes[len(nodes) - k], nodes[k - 1])
node1.next, node2.next = node2.next, node1.next if node1.next != node2 else node1
if node1 in prev and prev[node1] != node2:
prev[node1].next = node2
if node2 in prev and prev[node2] != node1:
prev[node2].next = node1
return head if node1 != head else node2
|
[
"jaykim9438@gmail.com"
] |
jaykim9438@gmail.com
|
745625739eb2a5d142639ae759a2c01bc73b0535
|
69da8d0f4d5d50b40019959a83dda09aa75f6dd3
|
/test/test_columndatatypegetter.py
|
19b486d3dce2c01e583932793a1daac2d5f241ce
|
[
"MIT"
] |
permissive
|
Peter-32/neatdata
|
62e8fbccd28257ec7e533eeec1cd5f579ae93247
|
8796ca9f027ad727440b2f11479ad5ab22aa8e09
|
refs/heads/master
| 2021-05-10T09:52:34.488575
| 2018-03-31T23:12:29
| 2018-03-31T23:12:29
| 118,937,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,276
|
py
|
import unittest
import pandas as pd
import numpy as np
from neatdata.neatdata import *
class TestColumnDataTypeGetter(unittest.TestCase):
def testColumnDataTypeGetter_Execute(self):
# Assemble
now = pd.datetime.now()
trainX = pd.DataFrame({'col1': [1,1,1,1,1,1,1],
'col2': ['a','a','a','b','a','b','b'],
'col3': ['a','a','a','b','a','b','b'],
'col4': ['a','a','a','b','a','b','b'],
'col5': ['a','a','a','b','a','b','b'],
'col6': [now,now,now,now,now,now,now],
'col7': [1,None,None,None,None,None,None],
'col8': ['a',None,None,None,None,None,None],
'col9': [now,None,None,None,None,None,None],
'col10': [np.nan,None,None,None,None,None,None],
'col11': [np.inf,None,None,None,None,None,None],
'col12': [-np.inf,1,None,None,None,None,None]})
indexColumns = ['col3','col4']
skipColumns = ['col5']
# Act
numberColumns, categoryColumns, datetimeColumns = ColumnDataTypeGetter().execute(trainX, indexColumns, skipColumns)
# Assert
self.assertTrue('col1' in numberColumns)
self.assertTrue('col2' in categoryColumns)
self.assertTrue('col3' not in numberColumns)
self.assertTrue('col3' not in categoryColumns)
self.assertTrue('col3' not in datetimeColumns)
self.assertTrue('col4' not in numberColumns)
self.assertTrue('col4' not in categoryColumns)
self.assertTrue('col4' not in datetimeColumns)
self.assertTrue('col5' not in numberColumns)
self.assertTrue('col5' not in categoryColumns)
self.assertTrue('col5' not in datetimeColumns)
self.assertTrue('col6' in datetimeColumns)
self.assertTrue('col7' in numberColumns)
self.assertTrue('col8' in categoryColumns)
self.assertTrue('col9' in datetimeColumns)
self.assertTrue('col10' in numberColumns)
self.assertTrue('col11' in numberColumns)
self.assertTrue('col12' in numberColumns)
|
[
"peter@impactradius.com"
] |
peter@impactradius.com
|
634bc45cb7e7a4fac71119db55fdd5b876c9f2c1
|
d2a2546165b3db6295a3f21972dda8ab9aab7846
|
/src/vehicles/witch_hill_dump.py
|
1c97fe495c9bea288903b47daf6afdd2b767a4a8
|
[] |
no_license
|
andythenorth/road-hog
|
bab12b133dd674f0e6d7ae87498675f8da96b982
|
1800d57d4ce904e7041f24646c393b37903d9466
|
refs/heads/main
| 2022-09-26T19:57:31.006800
| 2022-09-17T10:09:37
| 2022-09-17T10:09:37
| 214,848,659
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
from road_vehicle import DumpHauler, DieselRoadVehicle
consist = DumpHauler(id='witch_hill_dump',
base_numeric_id=500,
name='Witch Hill',
road_type='HAUL',
power=900,
speed=50, # dibbled up above RL for game balance
type_base_running_cost_points=30, # dibble running costs for game balance
vehicle_life=40,
intro_date=2007)
consist.add_unit(type=DieselRoadVehicle,
capacity=85, # much bigger is not much better here
vehicle_length=7,
effects=['EFFECT_SPRITE_AIRCRAFT_BREAKDOWN_SMOKE, -2, 1, 10', 'EFFECT_SPRITE_AIRCRAFT_BREAKDOWN_SMOKE, -2, -1, 10'])
|
[
"mail@andythenorth.co.uk"
] |
mail@andythenorth.co.uk
|
79ad6dc22c43fed47a393c0aff8caff6d7af35e4
|
f93ecb6738037629d6a7f81ccdc278a0e6051859
|
/backend/users/migrations/0002_auto_20210107_1422.py
|
0fcc43097aae6dd692bacb76114aaf83f58efae8
|
[] |
no_license
|
crowdbotics-apps/rntest-23713
|
5b30bda09e0023387c8f831655fc2c61178e54e9
|
1e08218f2b705815a63bba73a00590c439543e0d
|
refs/heads/master
| 2023-02-10T01:12:56.843111
| 2021-01-07T15:52:33
| 2021-01-07T15:52:33
| 327,614,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# Generated by Django 2.2.17 on 2021-01-07 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='group',
field=models.ManyToManyField(blank=True, related_name='user_group', to='course.Group'),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
e0576df71c2522cdb2051d37f75b5bdada967a89
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v10/services/services/audience_service/transports/base.py
|
ff56552e7c1a9f7d86489c08a7f4d73273e0a941
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 5,929
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v10.services.types import audience_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-ads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AudienceServiceTransport(abc.ABC):
"""Abstract transport class for AudienceService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.mutate_audiences: gapic_v1.method.wrap_method(
self.mutate_audiences,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def mutate_audiences(
self,
) -> Callable[
[audience_service.MutateAudiencesRequest],
Union[
audience_service.MutateAudiencesResponse,
Awaitable[audience_service.MutateAudiencesResponse],
],
]:
raise NotImplementedError()
__all__ = ("AudienceServiceTransport",)
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
e7d8a1682099ac7153d8ad000c1e50c2359043a1
|
c87397b08516625c178040e736cf87e61b227fa5
|
/inversioncount.py
|
73a7f1595a4acf8fc7b6152a498cb1a0cc991c25
|
[] |
no_license
|
sainihimanshu1999/HackerRank-Solution
|
26cb839aeb46c373643d5ad347a348103c1a147e
|
ad1e9e450474782b06add3c0c66108e3890d56ec
|
refs/heads/master
| 2022-12-24T07:16:48.576461
| 2020-09-11T15:16:34
| 2020-09-11T15:16:34
| 271,944,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the countInversions function below.
def countInversions(arr):
n = len(arr)
temp = [0]*n
return _mergeSort(arr,temp,0,n-1)
def _mergeSort(arr,temp,left,right):
count = 0
if left<right:
mid = (left+right)//2
count += _mergeSort(arr,temp,left,mid)
count += _mergeSort(arr,temp,mid+1,right)
count += merge(arr, temp, left, mid, right)
return count
def merge(arr,temp,left,mid,right):
i =left
j = mid+1
k = left
count = 0
while i<=mid and j <=right:
if arr[i]<=arr[j]:
temp[k] = arr[i]
k+=1
i+=1
else:
temp[k] = arr[j]
count += (mid-i+1)
k+= 1
j += 1
while i<=mid:
temp[k]=arr[i]
k += 1
i += 1
while j<= right:
temp[k] = arr[j]
k+= 1
j+= 1
for x in range(left,right+1):
arr[x] = temp[x]
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
arr = list(map(int, input().rstrip().split()))
result = countInversions(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"sainihimanshu.1999@gmail.com"
] |
sainihimanshu.1999@gmail.com
|
69cc6cdd46138f0cab03ad3c1137e1b4b13e2da9
|
b3d552675b36cb88a1388fcfc531e497ad7cbee9
|
/day2/filter_demo/filter_demo/views.py
|
096b4b76668d64379b9b12eb6c60b5e53333f08f
|
[] |
no_license
|
gaohj/1902_django
|
3cea1f0935fd983f25c6fd832b103ac5165a2e30
|
822af7b42120c6edc699bf97c800887ff84f5621
|
refs/heads/master
| 2022-12-11T10:02:50.233398
| 2019-11-26T08:33:38
| 2019-11-26T08:33:38
| 209,241,390
| 2
| 0
| null | 2022-12-08T07:28:24
| 2019-09-18T07:05:48
|
Python
|
UTF-8
|
Python
| false
| false
| 733
|
py
|
from django.shortcuts import render
from datetime import datetime
def greet(word):
return "hello world %s" % word
def index(request):
context = {
'greet':greet
}
return render(request,'index.html',context=context)
def add_view(request):
context = {
'value1': ['1','2','3','4'],
'value2':[5,'6',7]
}
return render(request, 'add.html', context=context)
def cut_view(request):
return render(request, 'cut.html')
def date_view(request):
context = {
'today':datetime.now()
}
return render(request, 'date.html',context=context)
def default_view(request):
context = {
'value':'haha'
}
return render(request, 'default.html',context=context)
|
[
"gaohj@126.com"
] |
gaohj@126.com
|
fac49fce5a9dca3eb4fba19fc3f0b99240d3b0d7
|
4bf344f5069a0048b7ee4fb49dc9a1126256f2ee
|
/fotalora_project/settings.py
|
48ec2c322f17cbee166e3cbc7f38246c16bcca96
|
[] |
no_license
|
rikicop/fotalora
|
1377881f866990ad96a90b3d3add04583c6a9175
|
4313bfce6423bcd6cdd79850e5c3975ae42b9de3
|
refs/heads/main
| 2023-04-12T02:22:05.124849
| 2021-05-15T23:07:32
| 2021-05-15T23:07:32
| 345,458,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,870
|
py
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'j1v0z5+s9%9_iaczr^8#!y%!xcmta93p3y_afjyor7w=^pf^%9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'website',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fotalora_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fotalora_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
MEDIA_URL = '/media/' #NEW
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') #NEW
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
|
[
"ruperto1@protonmail.com"
] |
ruperto1@protonmail.com
|
9f430d239d4708d95d76a6d4db2165837fcbc7e6
|
94bfb1346a9ce4cf6ca8bfeeb5194b7a467731a6
|
/aclark/db/migrations/0017_profile_twitter_username.py
|
b51796a7afd925ff83b1d5063aa2f2333263afca
|
[
"MIT"
] |
permissive
|
aclark4life/aclarknet-best-pro
|
4006cad37c2eec166a98a73e988b9b490a10e5cb
|
e256bfdd63ad4445bf0a75ef0b91f6e1fd2479ea
|
refs/heads/master
| 2023-03-01T09:10:04.041913
| 2020-12-01T18:40:07
| 2020-12-01T18:40:07
| 140,634,961
| 0
| 0
|
MIT
| 2021-02-10T01:57:38
| 2018-07-11T22:49:33
|
CSS
|
UTF-8
|
Python
| false
| false
| 398
|
py
|
# Generated by Django 2.1.9 on 2019-06-14 15:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("db", "0016_auto_20190614_1208")]
operations = [
migrations.AddField(
model_name="profile",
name="twitter_username",
field=models.CharField(blank=True, max_length=150, null=True),
)
]
|
[
"aclark@aclark.net"
] |
aclark@aclark.net
|
9da5dce08297733c59ac76e87bfeff418f8cd12d
|
3a9379132ef3ebb5ab9ae67a3baea146006381e6
|
/Pc_06_Beautiful Soup/pc_02_基本用法.py
|
328e49540567d5e34da008278433b7fa3527d567
|
[] |
no_license
|
ahaoao/PySpider
|
9c8280affcee27985105a09ea354ac77773d77a6
|
9c32bd56a8b198050f3b467fe233a3699de73ecf
|
refs/heads/master
| 2020-08-09T21:29:37.666947
| 2019-10-10T12:09:03
| 2019-10-10T12:09:03
| 214,172,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
from bs4 import BeautifulSoup
import requests
url = 'http://www.baidu.com/'
html = requests.get(url)
soup = BeautifulSoup(html.text, 'lxml')
# 调用prettify()方法,这个方法可以 把解析的字符串以标准的缩进格式输出
# 对于不标准的HTML字符串BeautifulSoup可以自动更正格式。这一步不是由prettify做的,而是由BeautifulSoup初始化时完成的
print(soup.prettify())
print(soup.script.string)
# soup.script.string 输出HTML中的script节点的文本内容
|
[
"18985242014@163.com"
] |
18985242014@163.com
|
565efacfb179f927cd8af26c61ae0c3ba3ef8487
|
81d0bfe1262008587ddf5ac12ae034d6922b9747
|
/.history/Smart/__init___20201119002524.py
|
cbb9b10af0ff19c067a3b2a07463887dced7e82c
|
[] |
no_license
|
elvinyeka/Smart-Mobile
|
525fffac14b8c460e85002bbf154bf54b4a341fe
|
a32f557306ae1bfe3ae01f5a8beef93727cfbc47
|
refs/heads/master
| 2023-06-09T09:52:18.446572
| 2021-07-06T11:35:34
| 2021-07-06T11:35:34
| 313,988,596
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
app.config['SQLALCHEMY_FATABASE_URI'] = 'sqlite:///smart.db'
db = SQLAlchemy(app)
from Smart.admin import routes
|
[
"elvinyeka@gmail.com"
] |
elvinyeka@gmail.com
|
5d3c950baf2810efddb7193b9a250d54b794cb01
|
6a6d8c0c8ddd6f5a1c03788f35320dd4b82314ea
|
/yamtbx/dataproc/cbf.py
|
f0cfd76d63abd9e9a3ee424e9184619b4ec59abc
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"MIT"
] |
permissive
|
nsls-ii-mx/yamtbx
|
b817a131a8f6f515db99bc1743f81218997ac4ed
|
311cf5a20e27a035a9e89c2abcb3c7d5e3684d67
|
refs/heads/master
| 2021-01-11T12:05:38.166937
| 2017-01-24T16:26:44
| 2017-01-24T16:26:44
| 76,574,177
| 1
| 0
| null | 2016-12-15T16:00:06
| 2016-12-15T16:00:06
| null |
UTF-8
|
Python
| false
| false
| 3,840
|
py
|
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import os
import pycbf
import numpy
from cbflib_adaptbx import cbf_binary_adaptor, CBFWriteAdaptor
def load_cbf_as_numpy(filein, quiet=True):
assert os.path.isfile(filein)
if not quiet:
print "reading", filein, "as cbf"
h = pycbf.cbf_handle_struct()
h.read_file(filein, pycbf.MSG_DIGEST)
ndimfast, ndimslow = h.get_image_size_fs(0)
arr = numpy.fromstring(h.get_image_fs_as_string(0, 4, 1, ndimfast, ndimslow), dtype=numpy.int32)
return arr, ndimfast, ndimslow
# load_cbf_as_numpy()
def load_minicbf_as_numpy(filein, quiet=True): # This can also read XDS special cbf
assert os.path.isfile(filein)
if not quiet:
print "reading", filein, "as minicbf"
h = pycbf.cbf_handle_struct()
h.read_file(filein, pycbf.MSG_DIGEST)
h.require_category("array_data")
h.find_column("data")
compression, binary_id, elsize, elsigned, elunsigned, elements, minelement, maxelement, bo, ndimfast, ndimmid, ndimslow, padding = h.get_integerarrayparameters_wdims()
assert elsize == 4 or elsize == 8
assert elsigned == 1
assert ndimslow <= 1
arr = numpy.fromstring(h.get_integerarray_as_string(), dtype=numpy.int32 if elsize==4 else numpy.int64)
return arr, ndimfast, ndimmid
# load_minicbf_as_numpy()
def load_cbf_as_flex(filein): # This can also read XDS special cbf
M = cbf_binary_adaptor(filein)
data = M.uncompress_implementation("buffer_based").uncompress_data()
nslow, nfast = M.dim_slow(), M.dim_fast() # can be obtained after getting data
return data, nfast, nslow
# load_cbf_as_flex()
def load_xds_special(cbfin):
h = pycbf.cbf_handle_struct()
h.read_file(cbfin, pycbf.MSG_DIGEST)
h.require_category("array_data")
h.find_column("header_contents")
header = h.get_value()
M = cbf_binary_adaptor(cbfin)
data = M.uncompress_implementation("buffer_based").uncompress_data()
#print "slow, fast=", M.dim_slow(), M.dim_fast() # can be obtained after getting data
return header, data, M.dim_slow(), M.dim_fast()
# load_xds_special()
def save_numpy_data_as_cbf(data, size1, size2, title, cbfout, pilatus_header=None):
h = pycbf.cbf_handle_struct()
h.new_datablock(title)
h.require_category('array_data')
if pilatus_header is not None:
h.require_column('header_convention')
h.set_value('"PILATUS_1.2"')
h.require_column('header_contents')
h.set_value(pilatus_header)
h.require_category('array_data')
h.require_column('data')
elsigned = 1
if data.dtype in (numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64):
elsigned = 0
h.set_integerarray_wdims_fs(pycbf.CBF_BYTE_OFFSET, 1, data.tostring(), data.dtype.itemsize,
elsigned, len(data), "little_endian",
size1, size2, 1, 0)
h.write_file(cbfout, pycbf.CBF,
pycbf.MIME_HEADERS|pycbf.MSG_DIGEST|pycbf.PAD_4K, pycbf.ENC_NONE)
# save_numpy_data_as_cbf()
def save_flex_int_as_cbf(data, cbfout):
writer = CBFWriteAdaptor(cbfout)
writer.write_data(data)
# save_flex_int_as_cbf()
def get_pilatus_header(cbfin):
h = pycbf.cbf_handle_struct()
if cbfin.endswith(".bz2"):
# TODO to speed up, better only bunzip2 the first part of file..
import tempfile
import bz2
junk, tmpf = tempfile.mkstemp()
open(tmpf, "wb").write(bz2.BZ2File(cbfin).read())
h.read_file(tmpf, pycbf.MSG_DIGEST)
os.remove(tmpf)
else:
h.read_file(cbfin, pycbf.MSG_DIGEST)
h.require_category("array_data")
h.find_column("header_contents")
header = h.get_value()
return header
# get_pilatus_header()
|
[
"keitaroyam@users.noreply.github.com"
] |
keitaroyam@users.noreply.github.com
|
0ade154f6e8c21659fa6a191193b26eca83f5fed
|
d439cfe7ae0b01026ba1a821fa2ab853ccee9600
|
/bi_eval/negativePointer1.py
|
5164723a9738a64080ea3830beee70725268479a
|
[] |
no_license
|
luofang0212/synyi_test
|
6e16a7d52aab8aba39605e09df1a4115bd7af39e
|
386d1c7a72bd7eae8d16c64492cd0ca3bc6cd775
|
refs/heads/master
| 2023-08-29T17:30:23.307048
| 2021-11-03T08:10:01
| 2021-11-03T08:10:01
| 411,960,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from bi_eval.score_color import to_color
'''
负向指标-情况1
当(参考值/实际值)>=150% ,
则指标得分=分值权重满分*150%
否则指标得分= (参考值/实际值)*分值权重满分
'''
# 实际值
actual_value = 200.76
# 参考值
reference_value = 32
# 分值
score = 100
# 分值权重
score_weight = 0.4
# 分值权重满分:指标分值
full_score = score * score_weight
print("实际值:{0} 参考值:{1} 分值权重满分(指标分值):{2}".format(actual_value, reference_value, full_score))
# 浮动范围
domain_of_walker = 0
# 指标得分
index_score = 0
result = reference_value / actual_value
print("比率:{0}".format(result))
if (result >= 1.5):
# 指标得分=分值权重满分*150%
index_score = full_score * 1.5
print("1指标得分= {0}".format(index_score))
to_color(index_score,score,full_score)
else:
# 则指标得分= (参考值/实际值)*分值权重满分
index_score = (reference_value / actual_value) * full_score
print("2指标得分= {0}".format(index_score))
to_color(index_score,score,full_score)
|
[
"warm_homel@163.com"
] |
warm_homel@163.com
|
5787255ed323fe6e376304ba1c7501341403c07f
|
720668c26680d91db9e19cca9a9e348ec8f615ee
|
/app/snippets/serializers/users.py
|
a79228232b8b1679fbb934783f51016ae39dbbca
|
[] |
no_license
|
orca9s/drf-tutorial
|
a619f4669d5cf38d5450e19f27491ddaa0fbe4b3
|
4a214a51b94e7449ad16e061e3b799e215059955
|
refs/heads/master
| 2020-03-23T07:41:01.865456
| 2018-08-22T12:18:04
| 2018-08-22T12:18:04
| 141,285,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
User = get_user_model()
__all__ = (
'UserListSerializer',
)
class UserBaseSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = (
'pk',
'username',
)
class UserListSerializer(UserBaseSerializer):
pass
|
[
"sang93423@gmail.com"
] |
sang93423@gmail.com
|
66f36110ab14cc56a6425df036cd827d82a1dd07
|
81c85850747f97ccc6ed36e3e0a859b99ef38fe8
|
/agesprot/settings.py
|
6630d034f511e8d362388af3c9ce8257d623e610
|
[] |
no_license
|
agesprot1/agesprot
|
f5047447a37ea8e92b4ffa2d72ae7814d0af8950
|
34c14a176bca5523999d27d5b9f695a6fac9df96
|
refs/heads/master
| 2021-01-20T22:11:18.686295
| 2016-08-23T22:38:57
| 2016-08-23T22:38:57
| 61,495,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,306
|
py
|
"""
Django settings for agesprot project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*@a%42&a%_4$uibdzen_^!f+gy)su!3m4anho4%vwpl1^n@b3c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mail_templated',
'agesprot.apps.base',
'agesprot.apps.users',
'agesprot.apps.project',
'agesprot.apps.activity',
'agesprot.apps.task',
'agesprot.apps.audit',
'agesprot.apps.notification',
'agesprot.apps.project.templatetags',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'agesprot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKENDS = (
'agesprot.backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend'
)
WSGI_APPLICATION = 'agesprot.wsgi.application'
# CONFIGURATION RESET PASSWORD
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'agesprot1@gmail.com'
EMAIL_HOST_PASSWORD = 'ftvoyvddltwpylyl'
EMAIL_PORT = 587
# END
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# LOCAL
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'agesprot_db',
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '',
}
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'd263b094gddtj',
'USER': 'lprhehqrpyfzrs',
'PASSWORD': 'BpKbhvVzip_5LaJP1kLlXVvyy7',
'HOST': 'ec2-50-19-219-148.compute-1.amazonaws.com',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_REDIRECT_URL = '/project/my-list-project/'
LOGIN_URL = '/users/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'America/Bogota'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = 'static'
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'agesprot/static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'agesprot/static'),
)
|
[
"alka65@hotmail.com"
] |
alka65@hotmail.com
|
ee7b9a746e26d3d7ca43bc6c61e95f16d6ebf222
|
e9c3e8f6ae05b0144237d01671f9a02404c43154
|
/miltiple_leds_blink.py
|
82014aa46d4c8b36a31d58522a17bd32d2b2ee2b
|
[] |
no_license
|
vtt-info/micropython-stm32-examples
|
b836fe8a54119fcfdd95046d4edae043a156b695
|
b6bbcb98a23b615914a015c7cbdedd550f5807ed
|
refs/heads/master
| 2022-07-27T17:38:31.202661
| 2020-05-13T10:57:20
| 2020-05-13T10:57:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
# File: miltiple_leds_blink.py
# Date: 2020-05-12
import utime as time
from machine import Pin, Timer
from micropython import const
import pyb
LED_ON = const(0)
LED_OFF = const(1)
pin_names = ['PB7', 'PB8', 'PB9']
leds = []
timers = []
def timer_cb(t):
for i in range(len(leds)):
if t is timers[i]:
# toggle: read-modify-write
x = leds[i].value()
leds[i].value( not x )
break
for pin in pin_names:
leds.append( Pin(pin,mode=Pin.OUT_PP,value=LED_OFF) )
for i in range(len(leds)):
timers.append( Timer(-1, freq=(1<<i), callback=timer_cb) )
try:
while True:
pass
except KeyboardInterrupt:
pass
finally:
for led in leds:
led.value(LED_OFF)
for tim in timers:
tim.deinit()
print('Done')
|
[
"noreply@github.com"
] |
vtt-info.noreply@github.com
|
0546778d3f2fa010ce9c2c93f6bc71b9f51e646d
|
28dbe47aba287ed94ef7bba734203736bcc06249
|
/.history/dmac_20200622205638.py
|
77543d6f6b94646db2f428cbd9cca2864dcb43d5
|
[] |
no_license
|
ntung88/Trading_Algorithms
|
242fd816b19df95e02e9fcd8c5c91c862d2ede40
|
d96488b1754e3751f739d9c3f094a8f8dc54a0a9
|
refs/heads/master
| 2022-11-19T16:04:07.800344
| 2020-07-17T21:14:10
| 2020-07-17T21:14:10
| 276,239,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,030
|
py
|
import yfinance as yf
import numpy as np
from scipy.stats import norm
import pandas as pd
from pandasgui import show
from scipy.optimize import minimize, LinearConstraint
def clean_data(data):
incomplete_idxs = False
for col in data.columns:
incomplete_idxs |= np.isnan(data[col])
return data[~incomplete_idxs]
def calc_crossovers(sma, lma):
num_points = len(clean_data(lma))
#Currently using only closing prices
sma = sma['Close']
lma = lma['Close']
high = (sma > lma)[-num_points:]
crossovers = high.astype(int).diff()[1:]
return crossovers[crossovers != 0]
def profit(data, crossovers):
if len(crossovers) == 0:
return 0
total = 0
if crossovers.iloc[0] == -1:
total += data.loc[crossovers.index[0]] - data.iloc[0]
for i in range(1,len(crossovers)):
left_bound = crossovers.index[i-1]
if crossovers.loc[left_bound] == 1:
right_bound = crossovers.index[i]
total += data.loc[right_bound] - data.loc[left_bound]
if crossovers.iloc[-1] == 1:
total += data.iloc[-1] - data.loc[crossovers.index[-1]]
return total
def optimize(data):
# short_period = cp.Variable(integer=True, nonneg=True)
# long_period = cp.Variable(integer=True, nonneg=True)
# constraints = [short_period >= 1, long_period >= short_period]
# obj = cp.Maximize(run_analysis(short_period, long_period, data))
# # Form and solve problem.
# prob = cp.Problem(obj, constraints)
# prob.solve() # Returns the optimal value.
# return (short_period.value, long_period.value, prob.value, prob.status)
cons = {'type': 'ineq', 'fun': lambda x: x[1] - x[0],
'type':'eq', 'fun': lambda x : max([x[i]-int(x[i]) for i in range(len(x))]),
'type': 'ineq', 'fun': lambda x: x[0] - 1}
short_seeds = range(5, 200, 20)
long_seeds = range(20, 800, 25)
minimum = float('inf')
best_short = 0
best_long = 0
for short_seed in short_seeds:
for long_seed in long_seeds:
if long_seed > short_seed:
res = minimize(run_analysis, [short_seed, long_seed], args=data, method='COBYLA', constraints=cons, options={'rhobeg': 10.0})
if res.fun < minimum:
best_short = res.x[0]
best_long = res.x[1]
minimum = res.fun
return (best_short, best_long, minimum)
def run_analysis(periods, data):
short_period = int(round(periods[0]))
long_period = int(round(periods[1]))
sma = data.rolling(short_period).mean()
lma = data.rolling(long_period).mean()
crossovers = calc_crossovers(sma, lma)
result = -1 * profit(data['Close'], crossovers)
# print(short_period, long_period, result)
return result
def main():
tickers = 'SPY AAPL MRNA TSLA'
data = yf.download(tickers, period='max', group_by='ticker')
dirty = pd.DataFrame(data['TSLA'])
frame = clean_data(dirty)
print(optimize(frame))
if __name__ == "__main__":
main()
|
[
"nathantung@Nathans-MacBook-Pro.local"
] |
nathantung@Nathans-MacBook-Pro.local
|
860f69580f038a5ee95e9bb3716a5de3706cd5e9
|
22b93005b05aa4cbfa6287c42e07244b9bf83be9
|
/examples/evaluation/evaluate_on_binary_classifier.py
|
401e318d6e536120fd59d628d4797c48f373b8c1
|
[
"Apache-2.0"
] |
permissive
|
dbczumar/mlflow
|
63ede1f21966def17ded0da9c8e92a207b34b90d
|
e293a73b510c924cbca50b6337b6d6f9fd9f8f1b
|
refs/heads/master
| 2023-08-31T23:40:55.475707
| 2023-07-15T04:22:18
| 2023-07-15T04:22:18
| 138,797,518
| 1
| 3
|
Apache-2.0
| 2023-08-23T23:01:08
| 2018-06-26T21:51:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
import xgboost
import shap
import mlflow
from mlflow.models import infer_signature
from sklearn.model_selection import train_test_split
# Load the UCI Adult Dataset
X, y = shap.datasets.adult()
# Split the data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# Fit an XGBoost binary classifier on the training data split
model = xgboost.XGBClassifier().fit(X_train, y_train)
# Infer model signature
predictions = model.predict(X_train)
signature = infer_signature(X_train, predictions)
# Build the Evaluation Dataset from the test set
eval_data = X_test
eval_data["label"] = y_test
with mlflow.start_run() as run:
# Log the XGBoost binary classifier model to MLflow
mlflow.sklearn.log_model(model, "model", signature=signature)
model_uri = mlflow.get_artifact_uri("model")
# Evaluate the logged model
result = mlflow.evaluate(
model_uri,
eval_data,
targets="label",
model_type="classifier",
evaluators=["default"],
)
print(f"metrics:\n{result.metrics}")
print(f"artifacts:\n{result.artifacts}")
|
[
"noreply@github.com"
] |
dbczumar.noreply@github.com
|
86f7dca124eb48e1a49eadf8555a03606c97d20a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_206/1234.py
|
642dcec803933335fa83ea5efbed092fd01d0f03
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
import sys
def compute_velocity(destination, horses, case):
horses.sort(reverse=True)
worst_time = (destination - horses[0][0]) / horses[0][1] # time = space / velocity
for horse in horses:
todo = destination - horse[0]
time = todo / horse[1]
if time > worst_time:
worst_time = time
curise_velocity = destination / worst_time
return "Case #" + str(case) + ": " + str(curise_velocity) + "\n"
def solve(input, output):
# Read input
with open(output, "w") as o:
with open(input, "r") as f:
f.readline() # Read number of examples
# Process examples
case = 1
while True:
line = f.readline()
if not line:
break
destination, num_horses = line.split(" ")
horses = []
for i in range(int(num_horses)):
row = f.readline()
pos, velocity = row.split(" ")
horses.append([int(pos), int(velocity)])
o.write(compute_velocity(int(destination), horses, case))
case += 1
if __name__ == '__main__':
solve(sys.argv[1], sys.argv[2])
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
8fcc98ef1e17399e656b2982f4f3df049be9a227
|
4fd7e936dd38213a74f19abd760cc2b5f2c9be3f
|
/119-guild-incorrectly-importing-flags-from-other-modules/test.py
|
ad3831b26d04ee89bf89cde77a645ab19b3aed9c
|
[
"Apache-2.0"
] |
permissive
|
guildai/issue-resolution
|
8eae7c74ffd71f018e62d7374ac173671e81c0be
|
7fc5f6dac9090c7a7838715e99cef2e8d9867729
|
refs/heads/master
| 2023-08-04T00:44:29.549711
| 2023-07-31T18:42:46
| 2023-07-31T18:43:44
| 200,896,019
| 0
| 3
| null | 2023-07-03T08:13:20
| 2019-08-06T17:29:37
|
Python
|
UTF-8
|
Python
| false
| false
| 231
|
py
|
import argparse
import submod # Unused but triggers the bug. See submod.py
p = argparse.ArgumentParser()
p.add_argument("--foo", default=123)
if __name__ == "__main__":
args = p.parse_args()
print("foo: %s" % args.foo)
|
[
"g@rre.tt"
] |
g@rre.tt
|
0fbb084607b6a8f4c9a5e8d59df82a86c66aefe8
|
5b58a332c6bea0688d196aabedfc8ccc49bdd134
|
/experiments/models_angles_10s/train.py
|
ed7fba7dc84e9805e65f5d67ac01dc274ec22035
|
[] |
no_license
|
ver228/classify_strains
|
5420c2b3ea8e93b6ba46900c385f52f664f1cbd7
|
dc61e7431410e25ab7c2da0acb6d090cc2ebaabb
|
refs/heads/master
| 2021-09-20T08:52:14.505868
| 2018-08-07T12:26:22
| 2018-08-07T12:26:22
| 108,448,619
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,703
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 10:43:03 2017
@author: ajaver
"""
import os
import sys
import time
import torch
from torch import nn
#Be sure to use abspath linux does not give the path if one uses __file__
_BASEDIR = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.join(_BASEDIR, os.pardir, os.pardir, 'src')
sys.path.append(src_dir)
from classify.trainer import init_generator, Trainer
import models
def main(
model_name = 'resnet18',
dataset = 'SWDB',
data_file = None, #get defaults
is_reduced = True,
sample_size_seconds = 10,
sample_frequency_s = 0.04,
n_batch = 32,
transform_type = 'eigenworms_full',
is_normalized = False,
n_epochs = 200,
):
if sys.platform == 'linux':
log_dir_root = '/work/ajaver/classify_strains/results'
else:
log_dir_root = '/Users/ajaver/OneDrive - Imperial College London/classify_strains/logs/'
#flag to check if cuda is available
is_cuda = torch.cuda.is_available()
#add the parent directory to the log results
pdir = os.path.split(_BASEDIR)[-1]
log_dir_root = os.path.join(log_dir_root, pdir)
params = dict(
is_reduced = is_reduced,
dataset = dataset,
data_file = data_file,
sample_size_seconds = sample_size_seconds,
sample_frequency_s = sample_frequency_s,
n_batch = n_batch,
transform_type = transform_type,
is_normalized = is_normalized,
is_cuda = is_cuda
)
gen_details, train_generator, test_generator = init_generator(**params)
assert model_name in dir(models)
get_model_func = getattr(models, model_name)
model = get_model_func(train_generator)
log_dir = os.path.join(log_dir_root, '{}_{}_{}'.format(model_name, gen_details, time.strftime('%Y%m%d_%H%M%S')))
#show some data for debugging purposes
print(model)
print(test_generator.valid_strains)
print(log_dir)
#maybe i should include the criterion and optimizer as input parameters
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
if is_cuda:
print('This is CUDA!!!!')
torch.backends.cudnn.benchmark = True #useful for arrays of fix dimension
model = model.cuda()
criterion = criterion.cuda()
t = Trainer(model,
optimizer,
criterion,
train_generator,
test_generator,
n_epochs,
log_dir
)
t.fit()
if __name__ == '__main__':
import fire
fire.Fire(main)
|
[
"ajaver@MRC-8791.local"
] |
ajaver@MRC-8791.local
|
df90a8b291201afd2ac6f43b22b4e233d8ae03ba
|
185f30795be9a8fec6539fe17753fb909e258e4c
|
/ljy_16并发编程/ljy_06守护进程.py
|
4bc8faf01e00183b143fd3a907f0a7d528374fa2
|
[] |
no_license
|
OPBrother/LearningPython
|
bd375430ce013abd9a4279f60e5f9457e965bdf7
|
9d264acb269a6191f7ec49abba25c98002f4fcd1
|
refs/heads/main
| 2023-03-31T06:47:43.071370
| 2021-04-12T07:09:16
| 2021-04-12T07:09:16
| 350,307,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
"""
守护进程:父进程活着子进程活着,父进程死亡子进程死亡,该子进程就是守护进程
"""
from multiprocessing import Process
import time
def task(name):
print("%s总管正在活着" % name)
time.sleep(3)
print("%s总管正在死亡" % name)
if __name__ == '__main__':
p = Process(target=task, args=("egon", ))
p.daemon = True # 将p设置成守护进程,这句一定要放在start前面,否则报错
p.start()
time.sleep(2)
print("皇帝寿终正寝")
|
[
"2276720277@qq.com"
] |
2276720277@qq.com
|
e3bf33f81a6bf24febb67c78fecdd9915a355ad3
|
b6d475893a3d5a83d17c4219eaa2c154d1f77ec6
|
/app/auth/views.py
|
af6e8155eb39d7642ebc5d16fafcbb90bec1f4ba
|
[
"MIT"
] |
permissive
|
MungaiKeren/Pitch-It
|
6de28bac0ef7392952bfe2e9df6ec40b2a4962a8
|
ae0d85ea9437da4aacadc297e9e0a20ae955debf
|
refs/heads/master
| 2020-07-25T10:09:52.369506
| 2019-10-02T06:46:20
| 2019-10-02T06:46:20
| 208,254,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
from flask import render_template,request,redirect, url_for, flash
from . import auth
from ..models import User
from .forms import RegistrationForm,LoginForm
from .. import db
from flask_login import login_user,logout_user,login_required
# from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Pitch-It Login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password = form.password.data)
db.session.add(user)
db.session.commit()
#mail_message("We are glad to recieve you from Pitch it","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
|
[
"wambukeren@gmail.com"
] |
wambukeren@gmail.com
|
7a7f0c99c26ddd39486ccc9e7cac0ca8934dce27
|
e41651d8f9b5d260b800136672c70cb85c3b80ff
|
/Notification_System/temboo/Library/Facebook/Actions/General/Follows/ReadFollows.py
|
91b6517f195a642d85b17b6b03768d222d7f5d93
|
[] |
no_license
|
shriswissfed/GPS-tracking-system
|
43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c
|
1c5e90a483386bd2e5c5f48f7c5b306cd5f17965
|
refs/heads/master
| 2020-05-23T03:06:46.484473
| 2018-10-03T08:50:00
| 2018-10-03T08:50:00
| 55,578,217
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,265
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ReadFollows
# Retrieves one or more follow actions.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ReadFollows(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ReadFollows Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ReadFollows, self).__init__(temboo_session, '/Library/Facebook/Actions/General/Follows/ReadFollows')
def new_input_set(self):
return ReadFollowsInputSet()
def _make_result_set(self, result, path):
return ReadFollowsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ReadFollowsChoreographyExecution(session, exec_id, path)
class ReadFollowsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ReadFollows
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
super(ReadFollowsInputSet, self)._set_input('AccessToken', value)
def set_ActionID(self, value):
"""
Set the value of the ActionID input for this Choreo. ((optional, string) The id of an action to retrieve. If an id is not provided, a list of all follow actions will be returned.)
"""
super(ReadFollowsInputSet, self)._set_input('ActionID', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma separated list of fields to return (i.e. id,name).)
"""
super(ReadFollowsInputSet, self)._set_input('Fields', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Used to page through results. Limits the number of records returned in the response.)
"""
super(ReadFollowsInputSet, self)._set_input('Limit', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Used to page through results. Returns results starting from the specified number.)
"""
super(ReadFollowsInputSet, self)._set_input('Offset', value)
def set_ProfileID(self, value):
"""
Set the value of the ProfileID input for this Choreo. ((optional, string) The id of the user's profile. Defaults to "me" indicating the authenticated user.)
"""
super(ReadFollowsInputSet, self)._set_input('ProfileID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(ReadFollowsInputSet, self)._set_input('ResponseFormat', value)
class ReadFollowsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ReadFollows Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_HasNext(self):
"""
Retrieve the value for the "HasNext" output from this Choreo execution. ((boolean) A boolean flag indicating that a next page exists.)
"""
return self._output.get('HasNext', None)
def get_HasPrevious(self):
"""
Retrieve the value for the "HasPrevious" output from this Choreo execution. ((boolean) A boolean flag indicating that a previous page exists.)
"""
return self._output.get('HasPrevious', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Facebook. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class ReadFollowsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ReadFollowsResultSet(response, path)
|
[
"shriswissfed@gmail.com"
] |
shriswissfed@gmail.com
|
3d592a84a2929f37cce6ce6273455084be52287f
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-Cocoa/PyObjCTest/test_nsdebug.py
|
a28db7783a27caf49e4e9f84f4f5836d944f21a2
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
import Foundation
from PyObjCTools.TestSupport import TestCase
class TestNSDebug(TestCase):
def testFunctions(self):
self.assertResultIsBOOL(Foundation.NSIsFreedObject)
Foundation.NSRecordAllocationEvent
Foundation.NSFrameAddress
Foundation.NSReturnAddress
Foundation.NSCountFrames
Foundation.NSRecordAllocationEvent
def testConstants(self):
self.assertEqual(Foundation.NSObjectAutoreleasedEvent, 3)
self.assertEqual(Foundation.NSObjectExtraRefIncrementedEvent, 4)
self.assertEqual(Foundation.NSObjectExtraRefDecrementedEvent, 5)
self.assertEqual(Foundation.NSObjectInternalRefIncrementedEvent, 6)
self.assertEqual(Foundation.NSObjectInternalRefDecrementedEvent, 7)
self.assertIsInstance(Foundation.NSDebugEnabled, bool)
self.assertIsInstance(Foundation.NSZombieEnabled, bool)
self.assertIsInstance(Foundation.NSDeallocateZombies, bool)
self.assertIsInstance(Foundation.NSHangOnUncaughtException, bool)
self.assertIsInstance(Foundation.NSKeepAllocationStatistics, bool)
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
82fe38e752875c45d8e077fbd4e2bd5dd55b4f04
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_middleweights.py
|
56d926d6a9b798ce9d565d446c1f67605078abf6
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
from xai.brain.wordbase.nouns._middleweight import _MIDDLEWEIGHT
#calss header
class _MIDDLEWEIGHTS(_MIDDLEWEIGHT, ):
def __init__(self,):
_MIDDLEWEIGHT.__init__(self)
self.name = "MIDDLEWEIGHTS"
self.specie = 'nouns'
self.basic = "middleweight"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
2fba743f49ec0a3a286e945e1e913510556c9323
|
d7f2007f2f9d87b314f59027d591226152a8aa8b
|
/pcaps/dnsreduce.py
|
3451f328624af2ebbde081bac998cc094e0876a0
|
[] |
no_license
|
jwde/comp116-jdestories
|
ad824d44c54cabfe4546113dfac113338e329a1f
|
8b0756f13e79c73b8da9e345788017dbda6a6a70
|
refs/heads/master
| 2021-01-18T13:20:36.410966
| 2015-12-15T22:26:49
| 2015-12-15T22:26:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
import fileinput
import re
lines = []
for line in fileinput.input():
lines.append(line)
for i in range(len(lines)):
if not i == 0:
match = re.search('.* (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*', lines[i])
if match:
ip = match.group(1)
print "\t".join([ip, lines[i - 1][18:]])
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
250f0548ad501d762e33ffc702fc874c85f97b85
|
fdec477002fb0c5f013faf369d2a1e782172a1d6
|
/COVID19/Vaccine/views.py
|
4ac499c5b50e3f84443b7b97adaadf63287c01d4
|
[] |
no_license
|
aimiranarzhigitova/API_projects
|
19fb416479e5a76dab760f38621e643e2db609cb
|
8256cc1bc8dc939453c61a39215e89dbd96fecb1
|
refs/heads/master
| 2023-05-16T08:52:51.209458
| 2021-06-06T09:44:53
| 2021-06-06T09:44:53
| 374,322,074
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,644
|
py
|
from collections import OrderedDict
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.generics import ListAPIView, ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.filters import SearchFilter
from rest_framework.pagination import PageNumberPagination
from .serializers import CategorySerializer, BaseVaccineSerializer, CustomerSerializer, \
ReviewCreateSerializers, MadeInSerializers, StatisticsSerializer, VoiceSerializer
from .models import Category, Vaccine, Customer, Review, MadeIn, Voice, Statistics
class VaccinePagination(PageNumberPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 10
def get_paginated_response(self, data):
return Response(OrderedDict([
('objects_count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('items', data)
]))
class MadeInListApiView(ListAPIView):
serializer_class = MadeInSerializers
queryset = MadeIn.objects.all()
class CategoryListApiView(ListCreateAPIView):
serializer_class = CategorySerializer
queryset = Category.objects.all()
class CategoryApiView(RetrieveUpdateDestroyAPIView):
serializer_class = CategorySerializer
queryset = Category.objects.all()
class VaccineListApiView(ListCreateAPIView):
serializer_class = BaseVaccineSerializer
pagination_class = VaccinePagination
queryset = Vaccine.objects.all()
filter_backends = [SearchFilter]
search_fields = ['price', 'title', 'ip']
class VaccineDetailApiView(RetrieveUpdateDestroyAPIView):
serializer_class = BaseVaccineSerializer
queryset = Vaccine.objects.all()
class CustomersListApiView(ListAPIView):
serializer_class = CustomerSerializer
queryset = Customer.objects.all()
class ReviewCreateView(ListAPIView):
queryset = Review.objects.all()
serializer_class = ReviewCreateSerializers
def post(self, request):
review = ReviewCreateSerializers(data=request.data)
if review.is_valid:
review.save()
class StatisticsListAPiView(ListAPIView):
queryset = Statistics.objects.all()
serializer_class = StatisticsSerializer
class SaveAudioListApiView(ListAPIView):
queryset = Review.objects.all()
serializer_class = ReviewCreateSerializers
def post(self, request):
audio_file = request.FILES.get('recorded_audio')
myObj = Voice
myObj.voice_record = audio_file
myObj.save()
return JsonResponse({
'success': True,
})
|
[
"aymira.narzhigitova@gmail.com"
] |
aymira.narzhigitova@gmail.com
|
9e7b09a6a4c2d1b2618300fb8c999147a2987994
|
700f9f9e319ebd26d2557d64ea3827808dfad2f5
|
/tests/fixtures/test_references_json/content_16_expected.py
|
44ae2665689e10ebe81b405d34d6a9fb9c33332b
|
[
"MIT"
] |
permissive
|
elifesciences/elife-tools
|
1b44e660e916a82ef8ff64dd5a6ee5506e517359
|
bc16e7dd5d6245077e39f8561b99c9acd510ddf7
|
refs/heads/develop
| 2023-03-06T08:37:47.424282
| 2023-02-20T20:40:49
| 2023-02-20T20:40:49
| 30,274,058
| 13
| 11
|
MIT
| 2023-02-20T20:40:50
| 2015-02-04T01:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
from collections import OrderedDict
expected = [
OrderedDict(
[
("type", "unknown"),
("id", u"bib11"),
("date", u"2006"),
(
"authors",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Cutler DM"),
("index", u"Cutler, DM"),
]
),
),
]
),
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Deaton AS"),
("index", u"Deaton, AS"),
]
),
),
]
),
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Lleras-Muney A"),
("index", u"Lleras-Muney, A"),
]
),
),
]
),
],
),
("title", u"The determinants of mortality (No. w11963)"),
("details", u"Cambridge, National Bureau of Economic Research"),
]
)
]
|
[
"gnott@starglobal.ca"
] |
gnott@starglobal.ca
|
9beb538b5dcf7efc3b834e8a9a8cf283d7bb8f56
|
f3d7aad9fae3275f232cdfd6417f1c9c8a610cc1
|
/titlesFotos.py
|
33d0eeb575494eab64b8877cc711de14be6bc006
|
[] |
no_license
|
LKingJ23/Python_Flask_Headlines
|
7b4a8c0f675e7e92cab29e99737ef487239a276e
|
886ca997846dcc82ed31033468e35d9958b1061b
|
refs/heads/master
| 2020-03-17T12:21:07.545431
| 2018-05-15T23:56:58
| 2018-05-15T23:56:58
| 133,584,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
from lxml import etree
import urllib2
ns={"Atom" : "http://www.w3.org/2005/Atom"}
parser=etree.XMLParser()
tree=etree.parse(urllib2.urlopen('https://api.flickr.com/services/feeds/photos_public.gne?tags=sevilla'),parser)
for node in tree.xpath('//Atom:entry/Atom:title', namespaces=ns) :
print node.text
|
[
"lkingj23@gmail.com"
] |
lkingj23@gmail.com
|
87ff49ec6b62039abc4b66959c36345ef52853ab
|
19bc4d44dc7303e23a6949b1bc7b98b65bcf80e9
|
/python/Hypothesis_Testing_with_Python/Experimental_Design/Sample_Size_Determination_with_Simulation/introduction.py
|
c3366c4c4a904b84707b4f9e2198df81eb5a06b7
|
[] |
no_license
|
henry1034/Challenge-Project-of-CodeCademy
|
c66190ff3a318e22f263fcf78344632773065c24
|
61ebe84696cec120393acca62b4fce4bdea0fb30
|
refs/heads/master
| 2023-07-04T01:04:16.978374
| 2021-07-29T17:27:56
| 2021-07-29T17:27:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
import pandas as pd
from scipy.stats import chi2_contingency
data = pd.read_csv("ab_data.csv")
print(data.head())
# calculate contingency table here
ab_contingency = pd.crosstab(data.Web_Version, data.Purchased)
print(ab_contingency)
# run your chi square test here
pval = chi2_contingency(ab_contingency)[1]
print(pval)
|
[
"noreply@github.com"
] |
henry1034.noreply@github.com
|
d4f4358369b0e45816e7faef51d89207e706197f
|
6ed86bcacca9d065251171a0b53498d630a3b340
|
/src/edrn/labcas/ui/views/_metadata.py
|
d2541bd601b050ff06a6233d1794ae1919ebb079
|
[] |
no_license
|
EDRN/edrn.labcas.ui
|
595f56b60a72632d1b816d2414a6887f0099e70a
|
54a5c947d1a93d73c13b33517e3f1fd0acaec3b5
|
refs/heads/master
| 2021-01-24T07:12:40.324545
| 2020-03-12T19:49:45
| 2020-03-12T19:49:45
| 38,981,139
| 1
| 0
| null | 2018-02-22T20:18:12
| 2015-07-12T22:55:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,148
|
py
|
# encoding: utf-8
from edrn.labcas.ui import PACKAGE_NAME
from edrn.labcas.ui.interfaces import IBackend
from edrn.labcas.ui.utils import (
LabCASWorkflow, re_python_rfc3986_URI_reference, LabCASCollection, createSchema, addIdentifiersForStringFields,
ID_NUMBER_HUNTER
)
from pyramid.httpexceptions import HTTPFound
from pyramid.view import view_config, view_defaults
from zope.component import getUtility
import deform, os, os.path, logging, uuid
# Logging
_logger = logging.getLogger(__name__)
# Metadata fields for NIST pipelines that generate dataset IDs
_nistMetadataFields = frozenset((u'LabNumber', u'ProtocolName', u'SampleId'))
@view_defaults(renderer=PACKAGE_NAME + ':templates/metadata.pt')
class MetadataView(object):
def __init__(self, request):
self.request = request
def _getDatasetDir(self, metadata, dir, collectionName):
u'''Create and return the path to the dataset directory.'''
if u'DatasetName' not in metadata:
raise ValueError(u'DatasetName is a required metadata')
datasetName = metadata[u'DatasetName'].replace(u' ', u'_')
collectionName = collectionName.replace(u' ', u'_')
datasetDir = os.path.join(dir, collectionName, datasetName)
if not os.path.isdir(datasetDir):
os.makedirs(datasetDir, 0775)
return datasetDir
@view_config(route_name='metadata', permission='upload')
def __call__(self):
backend = getUtility(IBackend)
workflowID = self.request.matchdict['workflowID']
wfInfo = backend.getWorkflowMgr().getWorkflowById(workflowID)
workflow = LabCASWorkflow(
wfInfo.get('id', u'unknown'),
wfInfo.get('name', u'unknown'),
wfInfo.get('conditions', []),
wfInfo.get('tasks', [])
)
form = deform.Form(createSchema(workflow, self.request), buttons=('submit',))
if 'submit' in self.request.params:
try:
metadataAppstruct = form.validate(self.request.POST.items())
# CA-1382 ugly kludge, CA-1540 reformat
if _nistMetadataFields <= frozenset(metadataAppstruct.keys()):
ln = metadataAppstruct[u'LabNumber']
pn = metadataAppstruct[u'ProtocolName']
si = metadataAppstruct[u'SampleId']
metadataAppstruct[u'DatasetName'] = metadataAppstruct[u'DatasetId'] = u'{}_{}_{}'.format(ln, pn, si)
elif u'DatasetName' in metadataAppstruct.keys():
metadataAppstruct[u'DatasetId'] = metadataAppstruct[u'DatasetName'].replace(u' ', u'_')
else:
metadataAppstruct[u'DatasetId'] = unicode(uuid.uuid4())
metadataAppstruct[u'DatasetName'] = metadataAppstruct[u'DatasetId']
addIdentifiersForStringFields(metadataAppstruct)
collectionName = workflow.collectionName
if not collectionName:
collectionName = metadataAppstruct[u'CollectionName']
datasetDir = self._getDatasetDir(metadataAppstruct, backend.getStagingDirectory(), collectionName)
if not os.path.isdir(datasetDir):
os.makedirs(datasetDir)
self.request.session['metadata'] = metadataAppstruct
self.request.session['metadataForm'] = form.render(metadataAppstruct, readonly=True)
self.request.session['datasetDir'] = datasetDir
self.request.session['workflow'] = workflow
return HTTPFound(self.request.url + u'/accept')
except deform.ValidationFailure as ex:
return {
u'message': u"Some required metadata don't make sense or are missing.",
u'form': ex.render(),
u'widgetResources': form.get_widget_resources(),
u'pageTitle': u'Upload Metadata'
}
return {
u'form': form.render(),
u'widgetResources': form.get_widget_resources(),
u'pageTitle': u'Upload Metadata'
}
|
[
"kelly@seankelly.biz"
] |
kelly@seankelly.biz
|
41d226f467c29b6749b5ff10392a729eccb01326
|
015efe8cf8e2740d76a8d0b378f1e75de182103a
|
/test/unitTestSuite.py
|
c3c6bfc137471219de330689fae11f42a4c88238
|
[
"MIT"
] |
permissive
|
Samakwa/PyGeodesy
|
d6dbe7f825ee1858cd58e677aae37fd6c60570ee
|
4a5b6ac584c12bafc243d08dfc18d872707126d0
|
refs/heads/master
| 2020-03-28T04:11:51.548133
| 2018-09-05T17:35:01
| 2018-09-05T17:35:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,680
|
py
|
# -*- coding: utf-8 -*-
# Module to run all PyGeodesy tests as python setup.py test
from glob import glob
from os.path import abspath, dirname, join
import sys
import unittest
_test_dir = dirname(abspath(__file__))
# extend sys.path to include the ../.. directory
if _test_dir not in sys.path: # Python 3+ ModuleNotFoundError
sys.path.insert(0, _test_dir)
from base import runner
__all__ = ('TestSuite',)
__version__ = '18.08.21'
class TestSuite(unittest.TestCase):
'''Combine all test modules into a test suite/case
and run each test module as a separate test.
'''
_runs = 0 # pseudo global
def _run(self, test):
TestSuite._runs += 1 # pseudo global
x, _ = runner(join(_test_dir, test + '.py'))
self.assertEqual(x, 0)
def test_Bases(self):
self._run('testBases')
def test_Classes(self):
self._run('testClasses')
def test_Datum(self):
self._run('testDatum')
def test_Dms(self):
self._run('testDms')
def test_Elevations(self):
self._run('testElevations')
def test_Ellipsoidal(self):
self._run('testEllipsoidal')
def test_Fmath(self):
self._run('testFmath')
def test_Geohash(self):
self._run('testGeohash')
def test_GreatCircle(self):
self._run('testGreatCircle')
def test_LatLon(self):
self._run('testLatLon')
def test_Lcc(self):
self._run('testLcc')
def test_Mgrs(self):
self._run('testMgrs')
def test_Modules(self):
self._run('testModules')
def test_NavlabExamples(self):
self._run('testNavlabExamples')
def test_Osgr(self):
self._run('testOsgr')
def test_Points(self):
self._run('testPoints')
def test_Routes(self):
self._run('testRoutes')
def test_Simplify(self):
self._run('testSimplify')
def test_Spherical(self):
self._run('testSpherical')
def test_Utils(self):
self._run('testUtils')
def test_Utm(self):
self._run('testUtm')
def test_UtmTMcoords(self):
self._run('testUtmTMcoords')
def test_Vectorial(self):
self._run('testVectorial')
def test_WebMercator(self):
self._run('testWebMercator')
def test_Ztotal(self):
# final test to make sure all tests were run
t = len(glob(join(_test_dir, 'test[A-Z]*.py')))
self.assertEqual(TestSuite._runs, t)
# t = sum(1 for t in dir(TestSuite) if t.startswith('test_'))
# self.assertEqual(TestSuite._runs, t)
if __name__ == '__main__':
unittest.main(argv=sys.argv) # catchbreak=None, failfast=None, verbosity=2
|
[
"mrJean1@Gmail.com"
] |
mrJean1@Gmail.com
|
bb4072ebe6c3e4a99fb1a57b8d0f722c97f38521
|
3e276ce46afcdaf365fd62b45ceba19327535f14
|
/src/libs/github/request.py
|
183377038e90b8e73a10a612875176a72e106be5
|
[
"MIT"
] |
permissive
|
17Y9E81/QQ-GitHub-Bot
|
1ca28ccc4b1a2bbbbb24419271389599dcd8ceb4
|
35c20d28aafaedc1813c6213ede9f2f51e56d5a2
|
refs/heads/master
| 2023-07-13T12:26:33.201661
| 2021-08-25T09:17:20
| 2021-08-25T09:17:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,030
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author : yanyongyu
@Date : 2021-03-09 17:34:53
@LastEditors : yanyongyu
@LastEditTime : 2021-06-15 22:14:45
@Description : None
@GitHub : https://github.com/yanyongyu
"""
__author__ = "yanyongyu"
import base64
import urllib.parse
from typing import Any, Optional
import httpx
class Requester:
def __init__(self, token_or_client_id: Optional[str],
client_secret: Optional[str], base_url: str, timeout: int,
user_agent: str, per_page: int, verify: bool):
if client_secret:
b64 = base64.b64encode(
f"{token_or_client_id}:{client_secret}".encode()).decode()
self._authorization: str = f"Basic {b64}"
elif token_or_client_id:
self._authorization: str = f"token {token_or_client_id}"
else:
self._authorization: str = ""
self._base_url = base_url
self._timeout = timeout
self._user_agent = user_agent
self._per_page = per_page
self._verify = verify
self._client: Optional[httpx.AsyncClient] = None
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
@property
def client(self) -> httpx.AsyncClient:
if not self._client:
headers = {
"User-Agent": self._user_agent,
"Authorization": self._authorization,
"Accept": "application/vnd.github.v3+json"
}
self._client = httpx.AsyncClient(headers=headers,
verify=self._verify,
timeout=self._timeout)
return self._client
async def request_json(self,
method: str,
url: str,
params: Optional[dict] = None,
headers: Optional[dict] = None,
json: Any = None):
return await self.request(method, url, params, headers, None, json)
async def request(self,
method: str,
url: str,
params: Optional[dict] = None,
headers: Optional[dict] = None,
data: Optional[dict] = None,
json: Any = None):
url = urllib.parse.urljoin(self._base_url, url)
response = await self.client.request(method,
url,
params=params,
headers=headers,
data=data,
json=json)
response.raise_for_status()
return response
async def close(self):
if self._client:
await self._client.aclose()
self._client = None
|
[
"yanyongyu_1@126.com"
] |
yanyongyu_1@126.com
|
2759d8c95ba6f43470bfd97c99a7dbf69b9fdb76
|
077de1b3c5b1e5531e96f999be95a63c02a0208a
|
/yabgp/config.py
|
6db8b9fa05bb91bc46dd9943570b1090f96d741a
|
[
"Apache-2.0"
] |
permissive
|
unixian97/yabgp
|
5171f3dfa9070fbf91bd2a34fad6d4f32b0b9534
|
1b6752376a43f3c2958ead0afbf3f33ec311ddbd
|
refs/heads/master
| 2021-01-24T18:12:31.133442
| 2017-02-10T08:10:14
| 2017-02-10T08:10:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,436
|
py
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" basic config """
import logging
import sys
import os
from oslo_config import cfg
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.BoolOpt('standalone', default=True, help='The BGP Agent running mode'),
cfg.StrOpt('pid-file', default=None, help='pid file name')
])
msg_process_opts = [
cfg.BoolOpt('write_disk',
default=True,
help='Whether the BGP message is written to disk'),
cfg.StrOpt('write_dir',
default=os.path.join(os.environ['HOME'], 'data/bgp/'),
help='The BGP messages storage path'),
cfg.IntOpt('write_msg_max_size',
default=500,
help='The Max size of one BGP message file, the unit is MB'),
cfg.BoolOpt('write_keepalive',
default=False,
help='Whether write keepalive message to disk'),
cfg.StrOpt('format',
default='json',
choices=['json', 'list'],
help='The output format of bgp messagees.')
]
CONF.register_opts(msg_process_opts, group='message')
bgp_config_opts = [
cfg.IntOpt('peer_start_interval',
default=10,
help='The interval to start each BGP peer'),
cfg.BoolOpt('four_bytes_as',
default=True,
help='If support 4bytes AS'),
cfg.BoolOpt('route_refresh',
default=True,
help='If support sending and receiving route refresh message'),
cfg.BoolOpt('cisco_route_refresh',
default=True,
help='If support sending and receiving cisco route refresh message'),
cfg.BoolOpt('enhanced_route_refresh',
default=True,
help='If support enhanced route refresh'),
cfg.StrOpt('add_path',
choices=['ipv4_send', 'ipv4_receive', 'ipv4_both'],
help='BGP additional path feature and supported address family'),
cfg.BoolOpt('graceful_restart',
default=True,
help='if support graceful restart'),
cfg.BoolOpt('cisco_multi_session',
default=True,
help='if support cisco multi session'),
cfg.DictOpt('running_config',
default={},
help='The running configuration for BGP'),
cfg.StrOpt('config_file',
help='BGP peers configuration file')
]
CONF.register_opts(bgp_config_opts, group='bgp')
bgp_peer_conf_cli_opts = [
cfg.IntOpt('remote_as',
help='The remote BGP peer AS number'),
cfg.IntOpt('local_as',
help='The Local BGP AS number'),
cfg.IPOpt('remote_addr',
help='The remote address of the peer'),
cfg.IPOpt('local_addr',
default='0.0.0.0',
help='The local address of the BGP'),
cfg.StrOpt('md5',
help='The MD5 string use to auth',
secret=True),
cfg.BoolOpt('rib',
default=False,
help='Whether maintain BGP rib table'),
cfg.StrOpt('tag',
choices=['SRC', 'DST', 'BOTH', 'MON'],
help='The agent role tag'
),
cfg.ListOpt('afi_safi',
default=['ipv4'],
help='The Global config for address family and sub address family')
]
CONF.register_cli_opts(bgp_peer_conf_cli_opts, group='bgp')
LOG = logging.getLogger(__name__)
def get_bgp_config():
"""
Get BGP running config
:return:
"""
# check bgp_conf_file
if CONF.bgp.config_file:
LOG.info('Try to load BGP configuration from %s', CONF.bgp.config_file)
LOG.error('Failed to load BGP configuration')
# TODO parse xml config file to get multi bgp config
# will be supported in future
sys.exit()
else:
# check bgp configuration from CLI input
LOG.info('Try to load BGP configuration from CLI input')
if CONF.bgp.local_as and CONF.bgp.remote_as and CONF.bgp.local_addr and CONF.bgp.remote_addr:
CONF.bgp.running_config[CONF.bgp.remote_addr] = {
'remote_as': CONF.bgp.remote_as,
'remote_addr': CONF.bgp.remote_addr,
'local_as': CONF.bgp.local_as,
'local_addr': CONF.bgp.local_addr,
'md5': CONF.bgp.md5,
'afi_safi': CONF.bgp.afi_safi,
'capability': {
'local': {
'four_bytes_as': CONF.bgp.four_bytes_as,
'route_refresh': CONF.bgp.route_refresh,
'cisco_route_refresh': CONF.bgp.cisco_route_refresh,
'enhanced_route_refresh': CONF.bgp.enhanced_route_refresh,
'graceful_restart': CONF.bgp.graceful_restart,
'cisco_multi_session': CONF.bgp.cisco_multi_session,
'add_path': CONF.bgp.add_path},
'remote': {}
},
'tag': CONF.bgp.tag
}
LOG.info('Get BGP running configuration for peer %s', CONF.bgp.remote_addr)
for item in CONF.bgp.running_config[CONF.bgp.remote_addr]:
if item == 'capability':
LOG.info('capability local:')
for capa in CONF.bgp.running_config[CONF.bgp.remote_addr][item]['local']:
LOG.info('-- %s: %s' % (
capa,
CONF.bgp.running_config[CONF.bgp.remote_addr][item]['local'][capa]
))
continue
LOG.info("%s = %s", item, CONF.bgp.running_config[CONF.bgp.remote_addr][item])
return
else:
LOG.error('Please provide enough parameters!')
sys.exit()
|
[
"xiaoquwl@gmail.com"
] |
xiaoquwl@gmail.com
|
9debf401bbd7759e2274873f21dbb4f2e291d155
|
892a20e473b51538a1297842c05e3dddc13d55d7
|
/indigo_pl/toc.py
|
5c61df032672fce31ffa1e95f6bbdf97b5ac11ae
|
[] |
no_license
|
epforgpl/pl-indigo
|
1eaa5662ed287610bc80bec8c3b363a036ea6de7
|
2722f65c27572c935b838979defcd1b282499419
|
refs/heads/master
| 2021-06-27T17:06:10.208890
| 2019-05-02T15:32:31
| 2019-05-02T15:32:31
| 143,410,269
| 1
| 1
| null | 2019-05-02T15:32:33
| 2018-08-03T09:52:39
|
Python
|
UTF-8
|
Python
| false
| false
| 860
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from indigo.analysis.toc.base import TOCBuilderBase
from indigo.plugins import plugins
@plugins.register('toc')
class TOCBuilderPL(TOCBuilderBase):
locale = ('pl', 'pol', None)
toc_elements = ["article", "chapter", "conclusions", "coverpage", "division", "paragraph", "preamble", "preface", "section", "subdivision"]
toc_non_unique_components = ['chapter', 'subdivision', 'paragraph']
titles = {
'article': lambda t: 'Art. %s' % t.num + (' - %s' % t.heading if t.heading else ''),
'chapter': lambda t: 'Rozdział %s' % t.num + (' - %s' % t.heading if t.heading else ''),
'division': lambda t: 'Dział %s' % t.num + (' - %s' % t.heading if t.heading else ''),
'paragraph': lambda t: t.num,
'section': lambda t: '§ %s' % t.num,
}
|
[
"greg@kempe.net"
] |
greg@kempe.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.