blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
092ccc2f819176198bb2b988a4e323d43bc51b42 | 20c6da49ec4028eba8249be8df6919fc4a920319 | /model_style_transfer/project/data_loader/ade_dataset.py | 45fb0584d1fd8c9c642df0f7b5670af4df434b7e | [] | no_license | SavaStevanovic/NetMLDemonstrator | 01819c73186666989b427429ae6a8422ab9bf164 | c1325b1e8706c019178be51455d670cee56e0819 | refs/heads/master | 2023-08-08T18:39:07.084442 | 2023-03-26T08:45:03 | 2023-03-26T08:45:03 | 252,441,080 | 0 | 0 | null | 2023-06-18T09:48:24 | 2020-04-02T11:52:20 | Python | UTF-8 | Python | false | false | 658 | py | from torch.utils.data import Dataset
from PIL import Image
import glob
import os
class ADEChallengeData2016(Dataset):
def __init__(self, mode, folder_path):
super(ADEChallengeData2016, self).__init__()
img_files = glob.glob(os.path.join(
'/Data/segmentation', folder_path, 'annotations', mode, '*.png'))
self.data = [(x.replace('.png', '.jpg').replace(
'annotations', 'images'), x) for x in img_files]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path, segm_path = self.data[index]
data = Image.open(img_path, mode='r')
return data
| [
"sava.stevanovic@pmf.edu.rs"
] | sava.stevanovic@pmf.edu.rs |
7ffc3b6f85cff22c0f89b8e8b676438dd3d048fe | 1488889766d328b856b8910d7f1a50b52b754774 | /firstsite/settings.py | ae74f052934497765e6907a52736af417b6336d6 | [] | no_license | vipul-rathod/firstsite | 1790cb3f67fb5b690c9cc24161ddb9b13d458718 | 84c09a3c66b58ff92b8b12d83c49c48d0d9bd82c | refs/heads/master | 2021-05-18T22:53:15.355755 | 2020-03-31T20:47:12 | 2020-03-31T20:47:12 | 251,464,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | """
Django settings for firstsite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't+q$0yid9(a5jz74d#iiq*7yvdzdh@p^g$sb(q^cv-(0%ul9=w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'firstsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'firstsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"vipsravs787@gmail.com"
] | vipsravs787@gmail.com |
cc4c287a1f334475b6b5e06eaabaf792d66e3b32 | 2018321e9f9a3edc1d92cf986df1826138fe6b50 | /pythonProject/button.py | d283c5fb028e73dba1f8517a1b8edb3a15eb47e9 | [] | no_license | thanyi/alien-invasion | 7a8ede01e65d8a58accd686bb7219ba58f7e5e0d | de96c3123a74c3009eca43edced6693d97b3c8f0 | refs/heads/master | 2023-02-11T10:20:33.110836 | 2021-01-02T11:24:44 | 2021-01-02T11:24:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | import pygame.font
import pygame
class Button():
def __init__(self, msg, screen):
self.screen = screen
self.screen_rect = self.screen.get_rect()
#填充图片的大小
self.width , self.height = 150,50
#背景颜色、字体颜色和字体
self.button_color = (0,255,0)
self.text_color = (255,255,255)
self.font = pygame.font.SysFont(None,36)
self.rect = pygame.Rect(0,0,self.width,self.height)
self.rect.centerx = self.screen_rect.centerx
self.rect.centery = self.screen_rect.centery
self.re_rect = pygame.Rect(0, 0, self.width, self.height)
self.re_rect.centerx = self.screen_rect.centerx
self.re_rect.centery = self.screen_rect.centery
self.prep_msg(msg)
def prep_msg(self,msg):
'''将其变成图像'''
self.msg_image =self.font.render(msg, True, self.text_color,self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
self.screen.fill(self.button_color,self.rect)
self.screen.blit(self.msg_image,self.msg_image_rect)
def draw_replay_button(self):
self.rect.centerx = self.msg_image_rect.x+20
self.rect.centery = self.msg_image_rect.y+210
self.screen.fill(self.button_color,self.rect)
self.screen.blit(self.msg_image,(self.msg_image_rect.x,self.msg_image_rect.y+200))
| [
"yiceyuan@126.com"
] | yiceyuan@126.com |
2b2a54641d5f56d801a5a0f1798713935087ef28 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/simtbx/run_tests.py | 5c3244e65192c78f2e1b57410133b5e40024a0a5 | [
"BSD-3-Clause",
"BSD-3-Clause-LBNL"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 468 | py | from __future__ import absolute_import, division, print_function
from libtbx import test_utils
import libtbx.load_env
tst_list = (
"$D/nanoBragg/tst_nanoBragg_minimal.py",
"$D/nanoBragg/tst_nanoBragg_mosaic.py",
"$D/nanoBragg/tst_gaussian_mosaicity.py",
)
def run():
build_dir = libtbx.env.under_build("simtbx")
dist_dir = libtbx.env.dist_path("simtbx")
test_utils.run_tests(build_dir, dist_dir, tst_list)
if (__name__ == "__main__"):
run()
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
fb22d74665a1f8e43b4181a24e234e13bccf6c86 | 9ecd34e90be231dd368abd1e2b7543f45eca2ad2 | /node_modules/socket.io/node_modules/socket.io-client/node_modules/ws/build/config.gypi | d920cef5c455ffef4ea25c9e9fb44568c26761e5 | [
"MIT"
] | permissive | pruthvikar/timer | a9339ed0e0e532da366e5433bba33a50453be6cc | d59e65cff1861d93d54d9cea31114e29ce43fc81 | refs/heads/master | 2020-06-08T04:01:51.862856 | 2013-04-29T01:23:02 | 2013-04-29T01:23:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,854 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 42,
"host_arch": "x64",
"node_install_npm": "true",
"node_install_waf": "true",
"node_prefix": "out/dist-osx/usr/local",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"target_arch": "x64",
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/Users/pruthvikarreddy/.node-gyp/0.8.19",
"copy_dev_lib": "true",
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"userignorefile": "/Users/pruthvikarreddy/.npmignore",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"ignore": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"coverage": "",
"json": "",
"pre": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/pruthvikarreddy/.npm-init.js",
"userconfig": "/Users/pruthvikarreddy/.npmrc",
"npaturl": "http://npat.npmjs.org/",
"node_version": "v0.8.19",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"yes": "",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/pruthvikarreddy/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.8.19 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "0.0.0",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/s3/14_2l8xd5sg5xmz0lg1c9j880000gn/T/",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"Pruthvikar@gmail.com"
] | Pruthvikar@gmail.com |
854209911af300efb0ca7e652dd682ae077fa116 | 475110b2e65643eb6f1dcdaa5bd4ee8b74f7900d | /backbones/densenet.py | 3c278afca1f1509b21b1fcd34b8fd4ccdafe8ee3 | [] | no_license | lgzbryant/classification_project | 47308d10142c9d871fa8c9a9d45d4f26f5509dce | 93856ee9bd11d82d6c0ff7785a3ab435631f039a | refs/heads/master | 2020-06-17T07:26:09.304920 | 2019-07-08T16:04:17 | 2019-07-08T16:04:17 | 195,845,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,834 | py | import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
from .utils import load_state_dict_from_url
__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}
def _bn_function_factory(norm, relu, conv):
def bn_function(*inputs):
concated_features = torch.cat(inputs, 1)
bottleneck_output = conv(relu(norm(concated_features)))
return bottleneck_output
return bn_function
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1,
bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1,
bias=False)),
self.drop_rate = drop_rate
self.memory_efficient = memory_efficient
def forward(self, *prev_features):
bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)
if self.memory_efficient and any(prev_feature.requires_grad for prev_feature in prev_features):
bottleneck_output = cp.checkpoint(bn_function, *prev_features)
else:
bottleneck_output = bn_function(*prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return new_features
class _DenseBlock(nn.Module):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.named_children():
new_features = layer(*features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - set to True to use checkpointing. Much more memory efficient,
but slower. Default: *False*
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,
padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# Linear layer
# self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)
# out = self.classifier(out)
return out
def _load_state_dict(model, model_url, progress):
# '.'s are no longer allowed in module names, but previous _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = load_state_dict_from_url(model_url, progress=progress)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress,
**kwargs):
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if pretrained:
_load_state_dict(model, model_urls[arch], progress)
return model
def densenet121(pretrained=False, progress=True, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,
**kwargs)
def densenet161(pretrained=False, progress=True, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
**kwargs)
def densenet169(pretrained=False, progress=True, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
**kwargs)
def densenet201(pretrained=False, progress=True, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, **kwargs) | [
"noreply@github.com"
] | lgzbryant.noreply@github.com |
80d457fe0e0df539d494873fa3d8e41ce774ae0b | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/palemale/a.py | f78d73ef5adea50522114802f390513ce3e2cfff | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 810 | py | import os, sys
with open(sys.argv[1], 'r') as infile:
N = int(infile.readline().strip())
for x in xrange(1, N+1):
T = infile.readline().strip()
cases = set(list(T))
intT = int(T)
current = intT
count = 2
stablecount = 0
while len(cases) < 10:
current = count*intT
count += 1
cur_num = len(cases)
cases.update(list(str(current)))
if cur_num == len(cases):
stablecount += 1
else:
stablecount = 0
if stablecount > 100:
current = 'INSOMNIA'
break
if isinstance(current, int):
current = str(current)
print "Case #%s: %s" % (x, current) | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
0f6f4d5d7aeebbacb367b43963db1842478c9ef1 | 96df532f6bebf067a302ed096ae1d5b47022073a | /test/test_parser_helper.py | fe6a05305c0c0208a934eec7302909698c339599 | [] | no_license | datamix-study/notification_bot | bb7907feaf4587d25214edfd4a1e4d21dd87f6aa | 109e38b9fff07dc97ab5ad35275ce1c8ed5264fb | refs/heads/master | 2020-08-11T23:49:58.094679 | 2020-03-07T14:40:51 | 2020-03-07T14:40:51 | 214,650,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145,738 | py | DATAMIX_INFORMATION_SOURCE = """
<!DOCTYPE html>
<html lang="ja"
itemscope
itemtype="http://schema.org/WebSite"
prefix="og: http://ogp.me/ns#" class="no-js">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="google-site-verification" content="R1OoJj7cg0JS9YC_7mCZQ3mzzA2Qe1gIn4_IJurT1X4" />
<link rel="shortcut icon" href="/favicon.ico">
<link rel="icon" type="image/png" sizes="32x32" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-16x16.png">
<link rel="apple-touch-icon" sizes="57x57" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/android-icon-192x192.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/ms-icon-144x144.png">
<link href="https://fonts.googleapis.com/css?family=Roboto:400,900" rel="stylesheet">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/style.css">
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/modernizr-custom.js"></script>
<title>ニュース | データサイエンティストを目指すならデータミックス</title>
<!-- Facebook Pixel Code -->
<script>
!function(f,b,e,v,n,t,s){if(f.fbq)return;n=f.fbq=function(){n.callMethod?
n.callMethod.apply(n,arguments):n.queue.push(arguments)};if(!f._fbq)f._fbq=n;
n.push=n;n.loaded=!0;n.version='2.0';n.queue=[];t=b.createElement(e);t.async=!0;
t.src=v;s=b.getElementsByTagName(e)[0];s.parentNode.insertBefore(t,s)}(window,
document,'script','https://connect.facebook.net/en_US/fbevents.js');
fbq('init', '760992884080078'); // Insert your pixel ID here.
fbq('track', 'PageView');
</script>
<noscript><img height="1" width="1" style="display:none"
src="https://www.facebook.com/tr?id=760992884080078&ev=PageView&noscript=1"
/></noscript>
<!-- DO NOT MODIFY -->
<!-- End Facebook Pixel Code -->
<!-- All in One SEO Pack 2.4.3 by Michael Torbert of Semper Fi Web Design[2736,2767] -->
<meta name="description" content="ニュースのページ。未経験から6ヶ月間で データサイエンティストを目指す社会人のためのデータサイエンティスト 育成専門の教育プログラム。IoT・ビッグデータ時代に必須のビジネス知識、統計学、機械学習、人工知能、データベース、プログラミング、SQLとBIツールのスキル獲得は株式会社データミックス。" />
<link rel='next' href='https://datamix.co.jp/news/page/2/' />
<link rel="canonical" href="https://datamix.co.jp/news/" />
<script type="text/javascript" >
window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;
ga('create', 'UA-99319144-1', 'auto');
// Plugins
ga('send', 'pageview');
</script>
<script async src="https://www.google-analytics.com/analytics.js"></script>
<!-- /all in one seo pack -->
<link rel='dns-prefetch' href='//s0.wp.com' />
<link rel='dns-prefetch' href='//secure.gravatar.com' />
<link rel='dns-prefetch' href='//s.w.org' />
<link rel="alternate" type="application/rss+xml" title="データサイエンティストを目指すならデータミックス » ニュース フィード" href="https://datamix.co.jp/news/feed/" />
<script type="text/javascript">
window._wpemojiSettings = {"baseUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/72x72\/","ext":".png","svgUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/svg\/","svgExt":".svg","source":{"concatemoji":"https:\/\/datamix.co.jp\/test\/wp-includes\/js\/wp-emoji-release.min.js?ver=4.9.3"}};
!function(a,b,c){function d(a,b){var c=String.fromCharCode;l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,a),0,0);var d=k.toDataURL();l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,b),0,0);var e=k.toDataURL();return d===e}function e(a){var b;if(!l||!l.fillText)return!1;switch(l.textBaseline="top",l.font="600 32px Arial",a){case"flag":return!(b=d([55356,56826,55356,56819],[55356,56826,8203,55356,56819]))&&(b=d([55356,57332,56128,56423,56128,56418,56128,56421,56128,56430,56128,56423,56128,56447],[55356,57332,8203,56128,56423,8203,56128,56418,8203,56128,56421,8203,56128,56430,8203,56128,56423,8203,56128,56447]),!b);case"emoji":return b=d([55357,56692,8205,9792,65039],[55357,56692,8203,9792,65039]),!b}return!1}function f(a){var c=b.createElement("script");c.src=a,c.defer=c.type="text/javascript",b.getElementsByTagName("head")[0].appendChild(c)}var g,h,i,j,k=b.createElement("canvas"),l=k.getContext&&k.getContext("2d");for(j=Array("flag","emoji"),c.supports={everything:!0,everythingExceptFlag:!0},i=0;i<j.length;i++)c.supports[j[i]]=e(j[i]),c.supports.everything=c.supports.everything&&c.supports[j[i]],"flag"!==j[i]&&(c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&c.supports[j[i]]);c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&!c.supports.flag,c.DOMReady=!1,c.readyCallback=function(){c.DOMReady=!0},c.supports.everything||(h=function(){c.readyCallback()},b.addEventListener?(b.addEventListener("DOMContentLoaded",h,!1),a.addEventListener("load",h,!1)):(a.attachEvent("onload",h),b.attachEvent("onreadystatechange",function(){"complete"===b.readyState&&c.readyCallback()})),g=c.source||{},g.concatemoji?f(g.concatemoji):g.wpemoji&&g.twemoji&&(f(g.twemoji),f(g.wpemoji)))}(window,document,window._wpemojiSettings);
</script>
<style type="text/css">
img.wp-smiley,
img.emoji {
display: inline !important;
border: none !important;
box-shadow: none !important;
height: 1em !important;
width: 1em !important;
margin: 0 .07em !important;
vertical-align: -0.1em !important;
background: none !important;
padding: 0 !important;
}
</style>
<link rel='stylesheet' id='jetpack_css-css' href='https://datamix.co.jp/test/wp-content/plugins/jetpack/css/jetpack.css?ver=6.8' type='text/css' media='all' />
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery.js?ver=1.12.4'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery-migrate.min.js?ver=1.4.1'></script>
<link rel='https://api.w.org/' href='https://datamix.co.jp/wp-json/' />
<link rel="EditURI" type="application/rsd+xml" title="RSD" href="https://datamix.co.jp/test/xmlrpc.php?rsd" />
<link rel="wlwmanifest" type="application/wlwmanifest+xml" href="https://datamix.co.jp/test/wp-includes/wlwmanifest.xml" />
<meta name="generator" content="WordPress 4.9.3" />
<!-- Markup (JSON-LD) structured in schema.org ver.4.1.8 START -->
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": "BreadcrumbList",
"itemListElement": [
{
"@type": "ListItem",
"position": 1,
"item": {
"@id": "https://datamix.co.jp",
"name": "データサイエンティストを目指すならデータミックス"
}
},
{
"@type": "ListItem",
"position": 2,
"item": {
"@id": "https://datamix.co.jp/news/",
"name": "ニュース"
}
}
]
}
</script>
<!-- Markup (JSON-LD) structured in schema.org END -->
<link rel='dns-prefetch' href='//v0.wordpress.com'/>
<link rel='dns-prefetch' href='//i0.wp.com'/>
<link rel='dns-prefetch' href='//i1.wp.com'/>
<link rel='dns-prefetch' href='//i2.wp.com'/>
<style type='text/css'>img#wpstats{display:none}</style> <style type="text/css">
html:not( .jetpack-lazy-images-js-enabled ) .jetpack-lazy-image {
display: none;
}
</style>
<script>
document.documentElement.classList.add(
'jetpack-lazy-images-js-enabled'
);
</script>
<style type="text/css" id="syntaxhighlighteranchor"></style>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css">
<script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>
//hljs.initHighlightingOnLoad();
$(function()
{
$( 'pre' ).each(function( i , block )
{
hljs.highlightBlock( block ) ;
} ) ;
} ) ;
</script>
<script>
//スムーズスクロール
jQuery(function(){
// #で始まるアンカーをクリックした場合に処理
jQuery('a[href^=#]').click(function() {
// スクロールの速度
var speed = 400; // ミリ秒
// アンカーの値取得
var href= jQuery(this).attr("href");
// 移動先を取得
var target = jQuery(href == "#" || href == "" ? 'html' : href);
// 移動先を数値で取得
var position = target.offset().top;
// スムーススクロール
jQuery('body,html').animate({scrollTop:position}, speed, 'swing');
return false;
});
});
</script>
<!-- Treasure Data -->
<script type="text/javascript">
!function(t,e){if(void 0===e[t]){e[t]=function(){e[t].clients.push(this),this._init=[Array.prototype.slice.call(arguments)]},e[t].clients=[];for(var r=function(t){return function(){return this["_"+t]=this["_"+t]||[],this["_"+t].push(Array.prototype.slice.call(arguments)),this}},s=["blockEvents","unblockEvents","setSignedMode","setAnonymousMode","resetUUID","addRecord","fetchGlobalID","set","trackEvent","trackPageview","trackClicks","ready","fetchUserSegments"],n=0;n<s.length;n++){var c=s[n];e[t].prototype[c]=r(c)}var o=document.createElement("script");o.type="text/javascript",o.async=!0,o.src=("https:"===document.location.protocol?"https:":"http:")+"//cdn.treasuredata.com/sdk/2.1/td.min.js";var a=document.getElementsByTagName("script")[0];a.parentNode.insertBefore(o,a)}}("Treasure",this);
</script>
</head>
<body class="archive post-type-archive post-type-archive-news">
<header>
<div class="cf">
<div class="logo __desktop __other">
<a href="https://datamix.co.jp/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
</div>
<nav id="Navigation" class="a-topnav transition_quick">
<a class="home_menu hidden_dt" href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
<div class="menu-main-container"><ul id="menu-main" class="cf"><li id="menu-item-1024" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-has-children menu-item-1024"><a href="#">スクール<span>SCHOOL</span></a>
<ul class="sub-menu">
<li id="menu-item-2345" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-2345"><a href="https://datamix.co.jp/data-scientist/">データサイエンティスト育成コース</a></li>
<li id="menu-item-2282" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-2282"><a href="https://datamix.co.jp/introductory-data-scientist-course/">データサイエンティスト準備ステップ</a></li>
</ul>
</li>
<li id="menu-item-1368" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1368"><a href="https://datamix.co.jp/for-employer/">人材紹介<span>For Employer</span></a></li>
<li id="menu-item-1005" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1005"><a href="https://datamix.co.jp/for-company/">法人向けサービス<span>For Company</span></a></li>
<li id="menu-item-1026" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-ancestor current-menu-parent menu-item-has-children menu-item-1026"><a href="#">ニュース<span>NEWS</span></a>
<ul class="sub-menu">
<li id="menu-item-1027" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-item menu-item-1027"><a href="/news">ニュース</a></li>
<li id="menu-item-1028" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1028"><a href="https://datamix.co.jp/blog/">ブログ</a></li>
<li id="menu-item-1317" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-1317"><a href="/event">イベント</a></li>
</ul>
</li>
<li id="menu-item-1029" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1029"><a href="https://datamix.co.jp/recruit/">採用情報<span>recruit</span></a></li>
<li id="menu-item-1030" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1030"><a href="https://datamix.co.jp/company/">会社概要<span>about us</span></a></li>
</ul></div>
</nav>
<!-- <div class="btn __other __desktop">
<div class="button_hvr yellow"><a href="https://datamix.co.jp/form-seminor/" class="hvr-shutter-out-horizontal"><span class="icon-plane"><b>説明会に申し込む</b></span></a></div> </div>
-->
<div id="MenuIcon" class="menu_button __mobile">
<div>
<div>
<span></span>
<span></span>
<span></span>
<span></span>
</div>
</div>
<span class="roboto uppercase">menu</span>
</div><!--2-->
<!--<div class="information transition_quick __mobile">
<div class="cta twin">
</div>
</div>-->
</div>
</header>
<div id="wrapper">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/css/news.css">
<div class="top_label">
<div>
<h1>ニュース</h1>
<h3 class="roboto uppercase">NEWS</h3>
</div>
</div>
<div class="content_section">
<p class="breadcrumb"><a href="https://datamix.co.jp/">HOME</a> > <a href="https://datamix.co.jp/news/">ニュース</a></p>
</div>
<div class="blogs content_section cf">
<div class="news-area">
<h4 class="page_sub_title blue">お知らせ</h4>
<h3 class="page_main_title blue roboto uppercase"><b>Information</b></h3>
<div class="inner">
<ul>
<li><h4><span class="date">2019.10.09</span><a href="https://datamix.co.jp/news/20191009/">2019年10月12日(土)の休講及び無料説明会中止のお知らせ</a></h4></li>
<li><h4><span class="date">2019.10.07</span><a href="https://datamix.co.jp/news/3551/">オデッセイコミュニケーションズ主催「第17回 オデッセイ ユニバーシティ」にて弊社代表が講演いたします</a></h4></li>
<li><h4><span class="date">2019.10.02</span><a href="https://datamix.co.jp/news/20191002/">DX時代の企業の人財戦略セミナー「AIを活用した組織分析、データサイエンティスト育成の実践」を株式会社ネクストエデュケーションシンク様と共催いたします</a></h4></li>
<li><h4><span class="date">2019.09.19</span><a href="https://datamix.co.jp/news/20190919/">「第8回日本HRチャレンジ大賞」においてイノベーション賞を受賞しました</a></h4></li>
<li><h4><span class="date">2019.09.10</span><a href="https://datamix.co.jp/news/20190910/">「ネクスト・ザ・ファースト46 – 次代を担う市場の開拓者-」に掲載されました</a></h4></li>
<li><h4><span class="date">2019.08.30</span><a href="https://datamix.co.jp/news/20190830/">【プレスリリース】 gacco® (ガッコ) セレクト有料講座に「データサイエンス スキル育成プログラム」を開講</a></h4></li>
<li><h4><span class="date">2019.04.05</span><a href="https://datamix.co.jp/news/20190409/">【プレスリリース】ゴールデンウィーク中でデータ分析スキルを身につけるデータサイエンス研修を提供 ― 短期間でデータ分析、機械学習の基礎知識を習得 ―</a></h4></li>
<li><h4><span class="date">2019.04.03</span><a href="https://datamix.co.jp/news/20190403/">【プレスリリース】「データサイエンティスト育成コース パートタイムプログラム」の開講を増設</a></h4></li>
<li><h4><span class="date">2019.03.12</span><a href="https://datamix.co.jp/news/20190312/">【プレスリリース】データミックスがSpeeeと業務提携を実施 ノウハウを活かした独自のビジネストランスレーター育成研修制度を提供</a></h4></li>
<li><h4><span class="date">2019.02.26</span><a href="https://datamix.co.jp/news/20190226/">【プレスリリース】国内のデータサイエンティスト育成スクールにおいて初の取組みとなる リアルな企業データを活用したデータ分析PoC『OpenPoC』の提供を開始</a></h4></li>
</ul>
</div>
<div class="pagination cf"><div></div></div>
</div>
</div>
<div class="media-area">
<div class="wrap">
<h4 class="page_sub_title blue">メディア掲載</h4>
<h3 class="page_main_title blue roboto uppercase"><b>Media</b></h3>
<div class="inner">
<ul class="clearfix">
<li class="clearfix">
<div class="image"><img src="" alt=""></div>
<h4><span class="date">2019.03.12</span><a href="https://datamix.co.jp/news/20190320/">【メディア掲載】フリーランスエンジニアNoteに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/1fd90f4f32d790f77dfd67c38d07918d.png?resize=140%2C31&ssl=1" alt=""></div>
<h4><span class="date">2018.02.15</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e6%97%a5%e5%88%8a%e5%b7%a5%e6%a5%ad%e6%96%b0%e8%81%9e%e9%9b%bb%e5%ad%90%e7%89%88%e3%81%ab%e5%bc%8a%e7%a4%be%e5%a0%85%e7%94%b0/">【メディア掲載】日刊工業新聞電子版に弊社堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/49eb521bb9c5b8089267706a57a64b7d.png?resize=140%2C100&ssl=1" alt=""></div>
<h4><span class="date">2018.02.10</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e3%83%9e%e3%82%a4%e3%83%8a%e3%83%93%e3%83%8b%e3%83%a5%e3%83%bc%e3%82%b9%e3%81%ab%e5%bc%8a%e7%a4%be%e4%bb%a3%e8%a1%a8-%e5%a0%85/">【メディア掲載】マイナビニュースに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2018/01/diamondlogo.png?resize=140%2C70&ssl=1" alt=""></div>
<h4><span class="date">2018.01.25</span><a href="https://datamix.co.jp/news/diamond_online/">【メディア掲載】Diamond onlineに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/hbol-logo.png?resize=140%2C80&ssl=1" alt=""></div>
<h4><span class="date">2017.12.28</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e3%83%8f%e3%83%bc%e3%83%90%e3%83%bc%e3%83%bb%e3%83%93%e3%82%b8%e3%83%8d%e3%82%b9%e3%83%bb%e3%82%aa%e3%83%b3%e3%83%a9%e3%82%a4/">【メディア掲載】ハーバー・ビジネス・オンラインに代表堅田のインタビュー記事が掲載されました。</a></h4>
</li>
</ul>
</div>
<div class="content_section page-template-news">
<div class="single button_wrap">
<div class="button_hvr yellow"><a href="/media" class="hvr-shutter-out-horizontal"><span class="icon-box3"><b>もっと見る</b></span></a></div>
</div>
</div>
</div>
</div>
<!-- end 20170926 kikuzawa -->
<section class="InViewSection home-companies published-area section relative cf" style="padding: 0 0 40px;">
<div class="content_section">
<h2>
<span>掲載メディア</span></h2>
<div class="published-list module-companies relative cf">
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2017/11/miniicon_ogpnikkei.png?fit=140%2C86&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/logo_NBD.png?fit=140%2C36&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2018/01/diamondlogo.png?fit=140%2C60&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/55172d3b380fdad2390fca2e86970c30.jpg?fit=140%2C34&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/9d2a3a1b6d85acbb34fdcaa2e7dfd677.jpg?fit=140%2C53&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/36392e404db2e0e87cf7e0f11adc0bc0.jpg?fit=140%2C36&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/1fd90f4f32d790f77dfd67c38d07918d.png?fit=140%2C17&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/49eb521bb9c5b8089267706a57a64b7d.png?fit=140%2C28&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/hbol-logo.png?fit=140%2C48&ssl=1" alt="">
</div>
</div>
</div>
</div>
</section>
<footer>
<div>
<div class="cf">
<div class="logo">
<a href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a><br>
<small class="copyright">©2017 DataMix</small>
</div>
<div class="ft_r">
<nav class="a-footer">
<div class="menu-ftm_new-container"><ul id="menu-ftm_new" class="cf"><li id="menu-item-1039" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-item menu-item-1039"><a href="/news">ニュース</a></li>
<li id="menu-item-980" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-980"><a href="https://datamix.co.jp/blog/">ブログ</a></li>
</ul></div> </nav>
<nav class="a-footer">
<div class="menu-footer-container"><ul id="menu-footer" class="cf"><li id="menu-item-54" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-54"><a href="https://datamix.co.jp/company/">会社概要</a></li>
<li id="menu-item-53" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-53"><a href="https://datamix.co.jp/terms-of-service/">利用規約</a></li>
<li id="menu-item-52" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-52"><a href="https://datamix.co.jp/privacy-policy/">個人情報保護方針</a></li>
<li id="menu-item-51" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-51"><a href="https://datamix.co.jp/act-on-specified-commercial-transaction/">特定商取引法に基づく表記</a></li>
<li id="menu-item-146" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-146"><a href="https://datamix.co.jp/form/">お問い合わせ</a></li>
</ul></div> </nav>
</div>
<p class="ft_robo"><img src="https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/lobo_01.png" alt=""></p>
</div>
</div>
</footer>
</div>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/main.js"></script>
<!--script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script-->
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.bgswitcher.js"></script>
<!--<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/tab.js"></script>-->
<script>
$(function() {
//クリックしたときのファンクションをまとめて指定
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target > .tab_wrap:first').css('display','block');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target2 > .tab_wrap:first').css('display','block');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target3 > .tab_wrap:first').css('display','block');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target4 > .tab_wrap:first').css('display','block');
$('.tab_target5 .tab_wrap').css('display','none');
$('.tab_target5 > .tab_wrap:first').css('display','block');
$('.tab li:first').addClass('select');
$('.tab li').click(function() {
//.index()を使いクリックされたタブが何番目かを調べ、
//indexという変数に代入します。
var index = $('.tab li').index(this);
//コンテンツを一度すべて非表示にし、
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target5 .tab_wrap').css('display','none');
//クリックされたタブと同じ順番のコンテンツを表示します。
$('.tab_target .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target2 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target3 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target4 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target5 .tab_wrap').eq(index).fadeIn("slow");
//一度タブについているクラスselectを消し、
$('.tab li').removeClass('select');
//クリックされたタブのみにクラスselectをつけます。
$(this).addClass('select');
});
});
</script>
<script type="text/javascript">
jQuery(function($) {
$('.slider').bgSwitcher({
images: ['https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_01.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_02.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_03.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_04.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_05.jpg'],
interval: 5000,
effect: "fade"
});
});
$(function() {
$ ('.instructors_sub li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main li.on').removeClass('on');
$('.instructors_main li[data-card="' + id + '"]').addClass('on');
});
$ ('.instructors_sub2 li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main2 li.on').removeClass('on');
$('.instructors_main2 li[data-card="' + id + '"]').addClass('on');
});
var rWidth = $('.round-box').outerWidth();
$('.round-box').css('height', rWidth);
var rWidth2 = $('.roundsec.round3 .round-box').outerWidth();
$('.roundsec.round3 .round-box').css('height', rWidth2);
});
</script>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.tile.js"></script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
$(".blog-area ul li").tile();
var windowSm = 750;
if (windowWidth > 750) {
$(".home-button ul li").tile();
$(".module-stepbox .module-stepbox-total_5 .module-stepbox-step").tile(5);
}
$(window).on("scroll", function() {
scrollHeight = $(document).height();
scrollPosition = $(window).height() + $(window).scrollTop();
footHeight = $("footer").innerHeight();
if (windowWidth < 750) {
if ( scrollHeight - scrollPosition <= footHeight ) {
$('.sp_fix_btn').slideUp();
} else {
$('.sp_fix_btn').slideDown();
}
}
});
});
</script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
if (windowWidth > 750) {
var mainheight = $('.page_main_banner').innerHeight()-70;
// var mainheight = $('.page_main_banner').height()+130;
var lavelheight = $('.top_label').height();
if(mainheight){
var hdheight = mainheight;
}
if(lavelheight){
var hdheight = lavelheight;
}
$('header').css('top', hdheight);
var triggerNode = $("header");
$(window).scroll(function () {
var value = $(this).scrollTop();
var triggerNodePosition = $(triggerNode).offset().top;
// 現在のスクロール位置が引き金要素の位置より下にあれば‥
if (value > hdheight) {
// なんらかの命令を実行
$('header').css({"top": 0,"position":"fixed"});
}else{
$('header').css({'top': hdheight, "position":"absolute"});
}
});
}
});
</script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create','UA-99319144-1','auto');ga('send','pageview');
</script>
<div style="display:none">
</div>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/_inc/build/photon/photon.min.js?ver=20130122'></script>
<script type='text/javascript' src='https://s0.wp.com/wp-content/js/devicepx-jetpack.js?ver=201941'></script>
<script type='text/javascript' src='https://secure.gravatar.com/js/gprofiles.js?ver=2019Octaa'></script>
<script type='text/javascript'>
/* <![CDATA[ */
var WPGroHo = {"my_hash":""};
/* ]]> */
</script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/modules/wpgroho.js?ver=4.9.3'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/_inc/build/lazy-images/js/lazy-images.min.js?ver=6.8'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/wp-embed.min.js?ver=4.9.3'></script>
<script type='text/javascript' src='https://stats.wp.com/e-201941.js' async='async' defer='defer'></script>
<script type='text/javascript'>
_stq = window._stq || [];
_stq.push([ 'view', {v:'ext',j:'1:6.8',blog:'155035170',post:'0',tz:'9',srv:'datamix.co.jp'} ]);
_stq.push([ 'clickTrackerInit', '155035170', '0' ]);
</script>
<!-- リマーケティング タグの Google コード -->
<!--------------------------------------------------
リマーケティング タグは、個人を特定できる情報と関連付けることも、デリケートなカテゴリに属するページに設置することも許可されません。タグの設定方法については、こちらのページをご覧ください。
http://google.com/ads/remarketingsetup
--------------------------------------------------->
<script type="text/javascript">
/* <![CDATA[ */
var google_conversion_id = 852033649;
var google_custom_params = window.google_tag_params;
var google_remarketing_only = true;
/* ]]> */
</script>
<script type="text/javascript" src="//www.googleadservices.com/pagead/conversion.js">
</script>
<noscript>
<div style="display:inline;">
<img height="1" width="1" style="border-style:none;" alt="" src="//googleads.g.doubleclick.net/pagead/viewthroughconversion/852033649/?guid=ON&script=0"/>
</div>
</noscript>
<!-- Yahoo Code for your Target List -->
<script type="text/javascript" language="javascript">
/* <![CDATA[ */
var yahoo_retargeting_id = '4F2M18WOUC';
var yahoo_retargeting_label = '';
var yahoo_retargeting_page_type = '';
var yahoo_retargeting_items = [{item_id: '', category_id: '', price: '', quantity: ''}];
/* ]]> */
</script>
<script type="text/javascript" language="javascript" src="//b92.yahoo.co.jp/js/s_retargeting.js"></script>
<script type="text/javascript">
//Configure an instance for your database
var td = new Treasure({
host: 'in.treasuredata.com',
writeKey: '9610/410a6a4e59ee7703f203ba2c070721601c08a013',
database: 'datamix_marketing',
startInSignedMode: true
});
// Enable cross-domain tracking
td.set('$global', 'td_global_id', 'td_global_id');
// Track pageview information into table
td.trackPageview('pageviews');
</script>
</body>
</html>
"""
DATAMIX_MEDIA_SOURCE = """
<!DOCTYPE html>
<html lang="ja"
itemscope
itemtype="http://schema.org/WebSite"
prefix="og: http://ogp.me/ns#" class="no-js">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="google-site-verification" content="R1OoJj7cg0JS9YC_7mCZQ3mzzA2Qe1gIn4_IJurT1X4" />
<link rel="shortcut icon" href="/favicon.ico">
<link rel="icon" type="image/png" sizes="32x32" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-16x16.png">
<link rel="apple-touch-icon" sizes="57x57" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/android-icon-192x192.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/ms-icon-144x144.png">
<link href="https://fonts.googleapis.com/css?family=Roboto:400,900" rel="stylesheet">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/style.css">
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/modernizr-custom.js"></script>
<title>ニュース | データサイエンティストを目指すならデータミックス</title>
<!-- Facebook Pixel Code -->
<script>
!function(f,b,e,v,n,t,s){if(f.fbq)return;n=f.fbq=function(){n.callMethod?
n.callMethod.apply(n,arguments):n.queue.push(arguments)};if(!f._fbq)f._fbq=n;
n.push=n;n.loaded=!0;n.version='2.0';n.queue=[];t=b.createElement(e);t.async=!0;
t.src=v;s=b.getElementsByTagName(e)[0];s.parentNode.insertBefore(t,s)}(window,
document,'script','https://connect.facebook.net/en_US/fbevents.js');
fbq('init', '760992884080078'); // Insert your pixel ID here.
fbq('track', 'PageView');
</script>
<noscript><img height="1" width="1" style="display:none"
src="https://www.facebook.com/tr?id=760992884080078&ev=PageView&noscript=1"
/></noscript>
<!-- DO NOT MODIFY -->
<!-- End Facebook Pixel Code -->
<!-- All in One SEO Pack 2.4.3 by Michael Torbert of Semper Fi Web Design[2736,2767] -->
<meta name="description" content="ニュースのページ。未経験から6ヶ月間で データサイエンティストを目指す社会人のためのデータサイエンティスト 育成専門の教育プログラム。IoT・ビッグデータ時代に必須のビジネス知識、統計学、機械学習、人工知能、データベース、プログラミング、SQLとBIツールのスキル獲得は株式会社データミックス。" />
<link rel='next' href='https://datamix.co.jp/news/page/2/' />
<link rel="canonical" href="https://datamix.co.jp/news/" />
<script type="text/javascript" >
window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;
ga('create', 'UA-99319144-1', 'auto');
// Plugins
ga('send', 'pageview');
</script>
<script async src="https://www.google-analytics.com/analytics.js"></script>
<!-- /all in one seo pack -->
<link rel='dns-prefetch' href='//s0.wp.com' />
<link rel='dns-prefetch' href='//secure.gravatar.com' />
<link rel='dns-prefetch' href='//s.w.org' />
<link rel="alternate" type="application/rss+xml" title="データサイエンティストを目指すならデータミックス » ニュース フィード" href="https://datamix.co.jp/news/feed/" />
<script type="text/javascript">
window._wpemojiSettings = {"baseUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/72x72\/","ext":".png","svgUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/svg\/","svgExt":".svg","source":{"concatemoji":"https:\/\/datamix.co.jp\/test\/wp-includes\/js\/wp-emoji-release.min.js?ver=4.9.3"}};
!function(a,b,c){function d(a,b){var c=String.fromCharCode;l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,a),0,0);var d=k.toDataURL();l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,b),0,0);var e=k.toDataURL();return d===e}function e(a){var b;if(!l||!l.fillText)return!1;switch(l.textBaseline="top",l.font="600 32px Arial",a){case"flag":return!(b=d([55356,56826,55356,56819],[55356,56826,8203,55356,56819]))&&(b=d([55356,57332,56128,56423,56128,56418,56128,56421,56128,56430,56128,56423,56128,56447],[55356,57332,8203,56128,56423,8203,56128,56418,8203,56128,56421,8203,56128,56430,8203,56128,56423,8203,56128,56447]),!b);case"emoji":return b=d([55357,56692,8205,9792,65039],[55357,56692,8203,9792,65039]),!b}return!1}function f(a){var c=b.createElement("script");c.src=a,c.defer=c.type="text/javascript",b.getElementsByTagName("head")[0].appendChild(c)}var g,h,i,j,k=b.createElement("canvas"),l=k.getContext&&k.getContext("2d");for(j=Array("flag","emoji"),c.supports={everything:!0,everythingExceptFlag:!0},i=0;i<j.length;i++)c.supports[j[i]]=e(j[i]),c.supports.everything=c.supports.everything&&c.supports[j[i]],"flag"!==j[i]&&(c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&c.supports[j[i]]);c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&!c.supports.flag,c.DOMReady=!1,c.readyCallback=function(){c.DOMReady=!0},c.supports.everything||(h=function(){c.readyCallback()},b.addEventListener?(b.addEventListener("DOMContentLoaded",h,!1),a.addEventListener("load",h,!1)):(a.attachEvent("onload",h),b.attachEvent("onreadystatechange",function(){"complete"===b.readyState&&c.readyCallback()})),g=c.source||{},g.concatemoji?f(g.concatemoji):g.wpemoji&&g.twemoji&&(f(g.twemoji),f(g.wpemoji)))}(window,document,window._wpemojiSettings);
</script>
<style type="text/css">
img.wp-smiley,
img.emoji {
display: inline !important;
border: none !important;
box-shadow: none !important;
height: 1em !important;
width: 1em !important;
margin: 0 .07em !important;
vertical-align: -0.1em !important;
background: none !important;
padding: 0 !important;
}
</style>
<link rel='stylesheet' id='jetpack_css-css' href='https://datamix.co.jp/test/wp-content/plugins/jetpack/css/jetpack.css?ver=6.8' type='text/css' media='all' />
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery.js?ver=1.12.4'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery-migrate.min.js?ver=1.4.1'></script>
<link rel='https://api.w.org/' href='https://datamix.co.jp/wp-json/' />
<link rel="EditURI" type="application/rsd+xml" title="RSD" href="https://datamix.co.jp/test/xmlrpc.php?rsd" />
<link rel="wlwmanifest" type="application/wlwmanifest+xml" href="https://datamix.co.jp/test/wp-includes/wlwmanifest.xml" />
<meta name="generator" content="WordPress 4.9.3" />
<!-- Markup (JSON-LD) structured in schema.org ver.4.1.8 START -->
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": "BreadcrumbList",
"itemListElement": [
{
"@type": "ListItem",
"position": 1,
"item": {
"@id": "https://datamix.co.jp",
"name": "データサイエンティストを目指すならデータミックス"
}
},
{
"@type": "ListItem",
"position": 2,
"item": {
"@id": "https://datamix.co.jp/news/",
"name": "ニュース"
}
}
]
}
</script>
<!-- Markup (JSON-LD) structured in schema.org END -->
<link rel='dns-prefetch' href='//v0.wordpress.com'/>
<link rel='dns-prefetch' href='//i0.wp.com'/>
<link rel='dns-prefetch' href='//i1.wp.com'/>
<link rel='dns-prefetch' href='//i2.wp.com'/>
<style type='text/css'>img#wpstats{display:none}</style> <style type="text/css">
html:not( .jetpack-lazy-images-js-enabled ) .jetpack-lazy-image {
display: none;
}
</style>
<script>
document.documentElement.classList.add(
'jetpack-lazy-images-js-enabled'
);
</script>
<style type="text/css" id="syntaxhighlighteranchor"></style>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css">
<script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>
//hljs.initHighlightingOnLoad();
$(function()
{
$( 'pre' ).each(function( i , block )
{
hljs.highlightBlock( block ) ;
} ) ;
} ) ;
</script>
<script>
//スムーズスクロール
jQuery(function(){
// #で始まるアンカーをクリックした場合に処理
jQuery('a[href^=#]').click(function() {
// スクロールの速度
var speed = 400; // ミリ秒
// アンカーの値取得
var href= jQuery(this).attr("href");
// 移動先を取得
var target = jQuery(href == "#" || href == "" ? 'html' : href);
// 移動先を数値で取得
var position = target.offset().top;
// スムーススクロール
jQuery('body,html').animate({scrollTop:position}, speed, 'swing');
return false;
});
});
</script>
<!-- Treasure Data -->
<script type="text/javascript">
!function(t,e){if(void 0===e[t]){e[t]=function(){e[t].clients.push(this),this._init=[Array.prototype.slice.call(arguments)]},e[t].clients=[];for(var r=function(t){return function(){return this["_"+t]=this["_"+t]||[],this["_"+t].push(Array.prototype.slice.call(arguments)),this}},s=["blockEvents","unblockEvents","setSignedMode","setAnonymousMode","resetUUID","addRecord","fetchGlobalID","set","trackEvent","trackPageview","trackClicks","ready","fetchUserSegments"],n=0;n<s.length;n++){var c=s[n];e[t].prototype[c]=r(c)}var o=document.createElement("script");o.type="text/javascript",o.async=!0,o.src=("https:"===document.location.protocol?"https:":"http:")+"//cdn.treasuredata.com/sdk/2.1/td.min.js";var a=document.getElementsByTagName("script")[0];a.parentNode.insertBefore(o,a)}}("Treasure",this);
</script>
</head>
<body class="archive post-type-archive post-type-archive-news">
<header>
<div class="cf">
<div class="logo __desktop __other">
<a href="https://datamix.co.jp/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
</div>
<nav id="Navigation" class="a-topnav transition_quick">
<a class="home_menu hidden_dt" href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
<div class="menu-main-container"><ul id="menu-main" class="cf"><li id="menu-item-1024" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-has-children menu-item-1024"><a href="#">スクール<span>SCHOOL</span></a>
<ul class="sub-menu">
<li id="menu-item-2345" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-2345"><a href="https://datamix.co.jp/data-scientist/">データサイエンティスト育成コース</a></li>
<li id="menu-item-2282" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-2282"><a href="https://datamix.co.jp/introductory-data-scientist-course/">データサイエンティスト準備ステップ</a></li>
</ul>
</li>
<li id="menu-item-1368" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1368"><a href="https://datamix.co.jp/for-employer/">人材紹介<span>For Employer</span></a></li>
<li id="menu-item-1005" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1005"><a href="https://datamix.co.jp/for-company/">法人向けサービス<span>For Company</span></a></li>
<li id="menu-item-1026" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-ancestor current-menu-parent menu-item-has-children menu-item-1026"><a href="#">ニュース<span>NEWS</span></a>
<ul class="sub-menu">
<li id="menu-item-1027" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-item menu-item-1027"><a href="/news">ニュース</a></li>
<li id="menu-item-1028" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1028"><a href="https://datamix.co.jp/blog/">ブログ</a></li>
<li id="menu-item-1317" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-1317"><a href="/event">イベント</a></li>
</ul>
</li>
<li id="menu-item-1029" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1029"><a href="https://datamix.co.jp/recruit/">採用情報<span>recruit</span></a></li>
<li id="menu-item-1030" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1030"><a href="https://datamix.co.jp/company/">会社概要<span>about us</span></a></li>
</ul></div>
</nav>
<!-- <div class="btn __other __desktop">
<div class="button_hvr yellow"><a href="https://datamix.co.jp/form-seminor/" class="hvr-shutter-out-horizontal"><span class="icon-plane"><b>説明会に申し込む</b></span></a></div> </div>
-->
<div id="MenuIcon" class="menu_button __mobile">
<div>
<div>
<span></span>
<span></span>
<span></span>
<span></span>
</div>
</div>
<span class="roboto uppercase">menu</span>
</div><!--2-->
<!--<div class="information transition_quick __mobile">
<div class="cta twin">
</div>
</div>-->
</div>
</header>
<div id="wrapper">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/css/news.css">
<div class="top_label">
<div>
<h1>ニュース</h1>
<h3 class="roboto uppercase">NEWS</h3>
</div>
</div>
<div class="content_section">
<p class="breadcrumb"><a href="https://datamix.co.jp/">HOME</a> > <a href="https://datamix.co.jp/news/">ニュース</a></p>
</div>
<div class="blogs content_section cf">
<div class="news-area">
<h4 class="page_sub_title blue">お知らせ</h4>
<h3 class="page_main_title blue roboto uppercase"><b>Information</b></h3>
<div class="inner">
<ul>
<li><h4><span class="date">2019.10.09</span><a href="https://datamix.co.jp/news/20191009/">2019年10月12日(土)の休講及び無料説明会中止のお知らせ</a></h4></li>
<li><h4><span class="date">2019.10.07</span><a href="https://datamix.co.jp/news/3551/">オデッセイコミュニケーションズ主催「第17回 オデッセイ ユニバーシティ」にて弊社代表が講演いたします</a></h4></li>
<li><h4><span class="date">2019.10.02</span><a href="https://datamix.co.jp/news/20191002/">DX時代の企業の人財戦略セミナー「AIを活用した組織分析、データサイエンティスト育成の実践」を株式会社ネクストエデュケーションシンク様と共催いたします</a></h4></li>
<li><h4><span class="date">2019.09.19</span><a href="https://datamix.co.jp/news/20190919/">「第8回日本HRチャレンジ大賞」においてイノベーション賞を受賞しました</a></h4></li>
<li><h4><span class="date">2019.09.10</span><a href="https://datamix.co.jp/news/20190910/">「ネクスト・ザ・ファースト46 – 次代を担う市場の開拓者-」に掲載されました</a></h4></li>
<li><h4><span class="date">2019.08.30</span><a href="https://datamix.co.jp/news/20190830/">【プレスリリース】 gacco® (ガッコ) セレクト有料講座に「データサイエンス スキル育成プログラム」を開講</a></h4></li>
<li><h4><span class="date">2019.04.05</span><a href="https://datamix.co.jp/news/20190409/">【プレスリリース】ゴールデンウィーク中でデータ分析スキルを身につけるデータサイエンス研修を提供 ― 短期間でデータ分析、機械学習の基礎知識を習得 ―</a></h4></li>
<li><h4><span class="date">2019.04.03</span><a href="https://datamix.co.jp/news/20190403/">【プレスリリース】「データサイエンティスト育成コース パートタイムプログラム」の開講を増設</a></h4></li>
<li><h4><span class="date">2019.03.12</span><a href="https://datamix.co.jp/news/20190312/">【プレスリリース】データミックスがSpeeeと業務提携を実施 ノウハウを活かした独自のビジネストランスレーター育成研修制度を提供</a></h4></li>
<li><h4><span class="date">2019.02.26</span><a href="https://datamix.co.jp/news/20190226/">【プレスリリース】国内のデータサイエンティスト育成スクールにおいて初の取組みとなる リアルな企業データを活用したデータ分析PoC『OpenPoC』の提供を開始</a></h4></li>
</ul>
</div>
<div class="pagination cf"><div></div></div>
</div>
</div>
<div class="media-area">
<div class="wrap">
<h4 class="page_sub_title blue">メディア掲載</h4>
<h3 class="page_main_title blue roboto uppercase"><b>Media</b></h3>
<div class="inner">
<ul class="clearfix">
<li class="clearfix">
<div class="image"><img src="" alt=""></div>
<h4><span class="date">2019.03.12</span><a href="https://datamix.co.jp/news/20190320/">【メディア掲載】フリーランスエンジニアNoteに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/1fd90f4f32d790f77dfd67c38d07918d.png?resize=140%2C31&ssl=1" alt=""></div>
<h4><span class="date">2018.02.15</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e6%97%a5%e5%88%8a%e5%b7%a5%e6%a5%ad%e6%96%b0%e8%81%9e%e9%9b%bb%e5%ad%90%e7%89%88%e3%81%ab%e5%bc%8a%e7%a4%be%e5%a0%85%e7%94%b0/">【メディア掲載】日刊工業新聞電子版に弊社堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/49eb521bb9c5b8089267706a57a64b7d.png?resize=140%2C100&ssl=1" alt=""></div>
<h4><span class="date">2018.02.10</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e3%83%9e%e3%82%a4%e3%83%8a%e3%83%93%e3%83%8b%e3%83%a5%e3%83%bc%e3%82%b9%e3%81%ab%e5%bc%8a%e7%a4%be%e4%bb%a3%e8%a1%a8-%e5%a0%85/">【メディア掲載】マイナビニュースに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2018/01/diamondlogo.png?resize=140%2C70&ssl=1" alt=""></div>
<h4><span class="date">2018.01.25</span><a href="https://datamix.co.jp/news/diamond_online/">【メディア掲載】Diamond onlineに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/hbol-logo.png?resize=140%2C80&ssl=1" alt=""></div>
<h4><span class="date">2017.12.28</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e3%83%8f%e3%83%bc%e3%83%90%e3%83%bc%e3%83%bb%e3%83%93%e3%82%b8%e3%83%8d%e3%82%b9%e3%83%bb%e3%82%aa%e3%83%b3%e3%83%a9%e3%82%a4/">【メディア掲載】ハーバー・ビジネス・オンラインに代表堅田のインタビュー記事が掲載されました。</a></h4>
</li>
</ul>
</div>
<div class="content_section page-template-news">
<div class="single button_wrap">
<div class="button_hvr yellow"><a href="/media" class="hvr-shutter-out-horizontal"><span class="icon-box3"><b>もっと見る</b></span></a></div>
</div>
</div>
</div>
</div>
<!-- end 20170926 kikuzawa -->
<section class="InViewSection home-companies published-area section relative cf" style="padding: 0 0 40px;">
<div class="content_section">
<h2>
<span>掲載メディア</span></h2>
<div class="published-list module-companies relative cf">
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2017/11/miniicon_ogpnikkei.png?fit=140%2C86&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/logo_NBD.png?fit=140%2C36&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2018/01/diamondlogo.png?fit=140%2C60&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/55172d3b380fdad2390fca2e86970c30.jpg?fit=140%2C34&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/9d2a3a1b6d85acbb34fdcaa2e7dfd677.jpg?fit=140%2C53&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/36392e404db2e0e87cf7e0f11adc0bc0.jpg?fit=140%2C36&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/1fd90f4f32d790f77dfd67c38d07918d.png?fit=140%2C17&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/49eb521bb9c5b8089267706a57a64b7d.png?fit=140%2C28&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/hbol-logo.png?fit=140%2C48&ssl=1" alt="">
</div>
</div>
</div>
</div>
</section>
<footer>
<div>
<div class="cf">
<div class="logo">
<a href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a><br>
<small class="copyright">©2017 DataMix</small>
</div>
<div class="ft_r">
<nav class="a-footer">
<div class="menu-ftm_new-container"><ul id="menu-ftm_new" class="cf"><li id="menu-item-1039" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-item menu-item-1039"><a href="/news">ニュース</a></li>
<li id="menu-item-980" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-980"><a href="https://datamix.co.jp/blog/">ブログ</a></li>
</ul></div> </nav>
<nav class="a-footer">
<div class="menu-footer-container"><ul id="menu-footer" class="cf"><li id="menu-item-54" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-54"><a href="https://datamix.co.jp/company/">会社概要</a></li>
<li id="menu-item-53" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-53"><a href="https://datamix.co.jp/terms-of-service/">利用規約</a></li>
<li id="menu-item-52" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-52"><a href="https://datamix.co.jp/privacy-policy/">個人情報保護方針</a></li>
<li id="menu-item-51" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-51"><a href="https://datamix.co.jp/act-on-specified-commercial-transaction/">特定商取引法に基づく表記</a></li>
<li id="menu-item-146" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-146"><a href="https://datamix.co.jp/form/">お問い合わせ</a></li>
</ul></div> </nav>
</div>
<p class="ft_robo"><img src="https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/lobo_01.png" alt=""></p>
</div>
</div>
</footer>
</div>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/main.js"></script>
<!--script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script-->
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.bgswitcher.js"></script>
<!--<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/tab.js"></script>-->
<script>
$(function() {
//クリックしたときのファンクションをまとめて指定
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target > .tab_wrap:first').css('display','block');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target2 > .tab_wrap:first').css('display','block');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target3 > .tab_wrap:first').css('display','block');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target4 > .tab_wrap:first').css('display','block');
$('.tab_target5 .tab_wrap').css('display','none');
$('.tab_target5 > .tab_wrap:first').css('display','block');
$('.tab li:first').addClass('select');
$('.tab li').click(function() {
//.index()を使いクリックされたタブが何番目かを調べ、
//indexという変数に代入します。
var index = $('.tab li').index(this);
//コンテンツを一度すべて非表示にし、
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target5 .tab_wrap').css('display','none');
//クリックされたタブと同じ順番のコンテンツを表示します。
$('.tab_target .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target2 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target3 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target4 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target5 .tab_wrap').eq(index).fadeIn("slow");
//一度タブについているクラスselectを消し、
$('.tab li').removeClass('select');
//クリックされたタブのみにクラスselectをつけます。
$(this).addClass('select');
});
});
</script>
<script type="text/javascript">
jQuery(function($) {
$('.slider').bgSwitcher({
images: ['https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_01.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_02.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_03.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_04.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_05.jpg'],
interval: 5000,
effect: "fade"
});
});
$(function() {
$ ('.instructors_sub li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main li.on').removeClass('on');
$('.instructors_main li[data-card="' + id + '"]').addClass('on');
});
$ ('.instructors_sub2 li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main2 li.on').removeClass('on');
$('.instructors_main2 li[data-card="' + id + '"]').addClass('on');
});
var rWidth = $('.round-box').outerWidth();
$('.round-box').css('height', rWidth);
var rWidth2 = $('.roundsec.round3 .round-box').outerWidth();
$('.roundsec.round3 .round-box').css('height', rWidth2);
});
</script>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.tile.js"></script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
$(".blog-area ul li").tile();
var windowSm = 750;
if (windowWidth > 750) {
$(".home-button ul li").tile();
$(".module-stepbox .module-stepbox-total_5 .module-stepbox-step").tile(5);
}
$(window).on("scroll", function() {
scrollHeight = $(document).height();
scrollPosition = $(window).height() + $(window).scrollTop();
footHeight = $("footer").innerHeight();
if (windowWidth < 750) {
if ( scrollHeight - scrollPosition <= footHeight ) {
$('.sp_fix_btn').slideUp();
} else {
$('.sp_fix_btn').slideDown();
}
}
});
});
</script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
if (windowWidth > 750) {
var mainheight = $('.page_main_banner').innerHeight()-70;
// var mainheight = $('.page_main_banner').height()+130;
var lavelheight = $('.top_label').height();
if(mainheight){
var hdheight = mainheight;
}
if(lavelheight){
var hdheight = lavelheight;
}
$('header').css('top', hdheight);
var triggerNode = $("header");
$(window).scroll(function () {
var value = $(this).scrollTop();
var triggerNodePosition = $(triggerNode).offset().top;
// 現在のスクロール位置が引き金要素の位置より下にあれば‥
if (value > hdheight) {
// なんらかの命令を実行
$('header').css({"top": 0,"position":"fixed"});
}else{
$('header').css({'top': hdheight, "position":"absolute"});
}
});
}
});
</script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create','UA-99319144-1','auto');ga('send','pageview');
</script>
<div style="display:none">
</div>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/_inc/build/photon/photon.min.js?ver=20130122'></script>
<script type='text/javascript' src='https://s0.wp.com/wp-content/js/devicepx-jetpack.js?ver=201941'></script>
<script type='text/javascript' src='https://secure.gravatar.com/js/gprofiles.js?ver=2019Octaa'></script>
<script type='text/javascript'>
/* <![CDATA[ */
var WPGroHo = {"my_hash":""};
/* ]]> */
</script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/modules/wpgroho.js?ver=4.9.3'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/_inc/build/lazy-images/js/lazy-images.min.js?ver=6.8'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/wp-embed.min.js?ver=4.9.3'></script>
<script type='text/javascript' src='https://stats.wp.com/e-201941.js' async='async' defer='defer'></script>
<script type='text/javascript'>
_stq = window._stq || [];
_stq.push([ 'view', {v:'ext',j:'1:6.8',blog:'155035170',post:'0',tz:'9',srv:'datamix.co.jp'} ]);
_stq.push([ 'clickTrackerInit', '155035170', '0' ]);
</script>
<!-- リマーケティング タグの Google コード -->
<!--------------------------------------------------
リマーケティング タグは、個人を特定できる情報と関連付けることも、デリケートなカテゴリに属するページに設置することも許可されません。タグの設定方法については、こちらのページをご覧ください。
http://google.com/ads/remarketingsetup
--------------------------------------------------->
<script type="text/javascript">
/* <![CDATA[ */
var google_conversion_id = 852033649;
var google_custom_params = window.google_tag_params;
var google_remarketing_only = true;
/* ]]> */
</script>
<script type="text/javascript" src="//www.googleadservices.com/pagead/conversion.js">
</script>
<noscript>
<div style="display:inline;">
<img height="1" width="1" style="border-style:none;" alt="" src="//googleads.g.doubleclick.net/pagead/viewthroughconversion/852033649/?guid=ON&script=0"/>
</div>
</noscript>
<!-- Yahoo Code for your Target List -->
<script type="text/javascript" language="javascript">
/* <![CDATA[ */
var yahoo_retargeting_id = '4F2M18WOUC';
var yahoo_retargeting_label = '';
var yahoo_retargeting_page_type = '';
var yahoo_retargeting_items = [{item_id: '', category_id: '', price: '', quantity: ''}];
/* ]]> */
</script>
<script type="text/javascript" language="javascript" src="//b92.yahoo.co.jp/js/s_retargeting.js"></script>
<script type="text/javascript">
//Configure an instance for your database
var td = new Treasure({
host: 'in.treasuredata.com',
writeKey: '9610/410a6a4e59ee7703f203ba2c070721601c08a013',
database: 'datamix_marketing',
startInSignedMode: true
});
// Enable cross-domain tracking
td.set('$global', 'td_global_id', 'td_global_id');
// Track pageview information into table
td.trackPageview('pageviews');
</script>
</body>
</html>
"""
DATAMIX_BLOG_SOURCE = """
<!DOCTYPE html>
<html lang="ja"
itemscope
itemtype="http://schema.org/Article"
prefix="og: http://ogp.me/ns#" class="no-js">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="google-site-verification" content="R1OoJj7cg0JS9YC_7mCZQ3mzzA2Qe1gIn4_IJurT1X4" />
<link rel="shortcut icon" href="/favicon.ico">
<link rel="icon" type="image/png" sizes="32x32" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-16x16.png">
<link rel="apple-touch-icon" sizes="57x57" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/android-icon-192x192.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/ms-icon-144x144.png">
<link href="https://fonts.googleapis.com/css?family=Roboto:400,900" rel="stylesheet">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/style.css">
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/modernizr-custom.js"></script>
<title>採用情報 | データサイエンティストを目指すならデータミックス</title>
<!-- Facebook Pixel Code -->
<script>
!function(f,b,e,v,n,t,s){if(f.fbq)return;n=f.fbq=function(){n.callMethod?
n.callMethod.apply(n,arguments):n.queue.push(arguments)};if(!f._fbq)f._fbq=n;
n.push=n;n.loaded=!0;n.version='2.0';n.queue=[];t=b.createElement(e);t.async=!0;
t.src=v;s=b.getElementsByTagName(e)[0];s.parentNode.insertBefore(t,s)}(window,
document,'script','https://connect.facebook.net/en_US/fbevents.js');
fbq('init', '760992884080078'); // Insert your pixel ID here.
fbq('track', 'PageView');
</script>
<noscript><img height="1" width="1" style="display:none"
src="https://www.facebook.com/tr?id=760992884080078&ev=PageView&noscript=1"
/></noscript>
<!-- DO NOT MODIFY -->
<!-- End Facebook Pixel Code -->
<!-- All in One SEO Pack 2.4.3 by Michael Torbert of Semper Fi Web Design[2736,2767] -->
<meta name="description" content="データサイエンティストや、スクール運営、バックオフィス、マーケティング担当など株式会社データミックスの採用情報。" />
<link rel="canonical" href="https://datamix.co.jp/recruit/" />
<meta property="og:title" content="採用情報 " />
<meta property="og:type" content="article" />
<meta property="og:url" content="https://datamix.co.jp/recruit/" />
<meta property="og:image" content="https://datamix.xsrv.jp/wp-content/uploads/2017/05/datamix_ogp.png" />
<meta property="og:site_name" content="データサイエンティストを目指すならデータミックス" />
<meta property="fb:app_id" content="128471241040393" />
<meta property="og:description" content="データサイエンティストや、スクール運営、バックオフィス、マーケティング担当など株式会社データミックスの採用情報。" />
<meta property="article:published_time" content="2017-10-18T10:59:14Z" />
<meta property="article:modified_time" content="2019-12-25T17:10:07Z" />
<meta name="twitter:card" content="summary" />
<meta name="twitter:title" content="採用情報 " />
<meta name="twitter:description" content="データサイエンティストや、スクール運営、バックオフィス、マーケティング担当など株式会社データミックスの採用情報。" />
<meta name="twitter:image" content="https://datamix.xsrv.jp/wp-content/uploads/2017/05/datamix_ogp.png" />
<meta itemprop="image" content="https://datamix.xsrv.jp/wp-content/uploads/2017/05/datamix_ogp.png" />
<!-- /all in one seo pack -->
<link rel='dns-prefetch' href='//s.w.org' />
<script type="text/javascript">
window._wpemojiSettings = {"baseUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/72x72\/","ext":".png","svgUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/svg\/","svgExt":".svg","source":{"concatemoji":"https:\/\/datamix.co.jp\/test\/wp-includes\/js\/wp-emoji-release.min.js?ver=4.9.3"}};
!function(a,b,c){function d(a,b){var c=String.fromCharCode;l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,a),0,0);var d=k.toDataURL();l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,b),0,0);var e=k.toDataURL();return d===e}function e(a){var b;if(!l||!l.fillText)return!1;switch(l.textBaseline="top",l.font="600 32px Arial",a){case"flag":return!(b=d([55356,56826,55356,56819],[55356,56826,8203,55356,56819]))&&(b=d([55356,57332,56128,56423,56128,56418,56128,56421,56128,56430,56128,56423,56128,56447],[55356,57332,8203,56128,56423,8203,56128,56418,8203,56128,56421,8203,56128,56430,8203,56128,56423,8203,56128,56447]),!b);case"emoji":return b=d([55357,56692,8205,9792,65039],[55357,56692,8203,9792,65039]),!b}return!1}function f(a){var c=b.createElement("script");c.src=a,c.defer=c.type="text/javascript",b.getElementsByTagName("head")[0].appendChild(c)}var g,h,i,j,k=b.createElement("canvas"),l=k.getContext&&k.getContext("2d");for(j=Array("flag","emoji"),c.supports={everything:!0,everythingExceptFlag:!0},i=0;i<j.length;i++)c.supports[j[i]]=e(j[i]),c.supports.everything=c.supports.everything&&c.supports[j[i]],"flag"!==j[i]&&(c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&c.supports[j[i]]);c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&!c.supports.flag,c.DOMReady=!1,c.readyCallback=function(){c.DOMReady=!0},c.supports.everything||(h=function(){c.readyCallback()},b.addEventListener?(b.addEventListener("DOMContentLoaded",h,!1),a.addEventListener("load",h,!1)):(a.attachEvent("onload",h),b.attachEvent("onreadystatechange",function(){"complete"===b.readyState&&c.readyCallback()})),g=c.source||{},g.concatemoji?f(g.concatemoji):g.wpemoji&&g.twemoji&&(f(g.twemoji),f(g.wpemoji)))}(window,document,window._wpemojiSettings);
</script>
<style type="text/css">
img.wp-smiley,
img.emoji {
display: inline !important;
border: none !important;
box-shadow: none !important;
height: 1em !important;
width: 1em !important;
margin: 0 .07em !important;
vertical-align: -0.1em !important;
background: none !important;
padding: 0 !important;
}
</style>
<link rel='stylesheet' id='contact-form-7-css' href='https://datamix.co.jp/test/wp-content/plugins/contact-form-7/includes/css/styles.css?ver=5.1.5' type='text/css' media='all' />
<link rel='stylesheet' id='fvch-styles-css' href='https://datamix.co.jp/test/wp-content/plugins/fv-code-highlighter/public/css/fvch-styles-dark.min.css?ver=1.2' type='text/css' media='all' />
<link rel='stylesheet' id='__EPYT__style-css' href='https://datamix.co.jp/test/wp-content/plugins/youtube-embed-plus/styles/ytprefs.min.css?ver=13.1.2.5' type='text/css' media='all' />
<style id='__EPYT__style-inline-css' type='text/css'>
.epyt-gallery-thumb {
width: 33.333%;
}
</style>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery.js?ver=1.12.4'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery-migrate.min.js?ver=1.4.1'></script>
<script type='text/javascript'>
/* <![CDATA[ */
var _EPYT_ = {"ajaxurl":"https:\/\/datamix.co.jp\/test\/wp-admin\/admin-ajax.php","security":"e521b7db4c","gallery_scrolloffset":"20","eppathtoscripts":"https:\/\/datamix.co.jp\/test\/wp-content\/plugins\/youtube-embed-plus\/scripts\/","eppath":"https:\/\/datamix.co.jp\/test\/wp-content\/plugins\/youtube-embed-plus\/","epresponsiveselector":"[\"iframe[src*='youtube.com']\",\"iframe[src*='youtube-nocookie.com']\",\"iframe[data-ep-src*='youtube.com']\",\"iframe[data-ep-src*='youtube-nocookie.com']\",\"iframe[data-ep-gallerysrc*='youtube.com']\"]","epdovol":"1","version":"13.1.2.5","evselector":"iframe.__youtube_prefs__[src], iframe[src*=\"youtube.com\/embed\/\"], iframe[src*=\"youtube-nocookie.com\/embed\/\"]","ajax_compat":"","ytapi_load":"light","stopMobileBuffer":"1","vi_active":"","vi_js_posttypes":[]};
/* ]]> */
</script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/youtube-embed-plus/scripts/ytprefs.min.js?ver=13.1.2.5'></script>
<link rel='https://api.w.org/' href='https://datamix.co.jp/wp-json/' />
<link rel="EditURI" type="application/rsd+xml" title="RSD" href="https://datamix.co.jp/test/xmlrpc.php?rsd" />
<link rel="wlwmanifest" type="application/wlwmanifest+xml" href="https://datamix.co.jp/test/wp-includes/wlwmanifest.xml" />
<meta name="generator" content="WordPress 4.9.3" />
<link rel='shortlink' href='https://datamix.co.jp/?p=999' />
<link rel="alternate" type="application/json+oembed" href="https://datamix.co.jp/wp-json/oembed/1.0/embed?url=https%3A%2F%2Fdatamix.co.jp%2Frecruit%2F" />
<link rel="alternate" type="text/xml+oembed" href="https://datamix.co.jp/wp-json/oembed/1.0/embed?url=https%3A%2F%2Fdatamix.co.jp%2Frecruit%2F&format=xml" />
<style type="text/css">
.fvch-codeblock {
background: #2e2e2d !important;
background-position-y: 4px !important;
}
.fvch-codeblock pre, .fvch-line-number {
line-height: 1.5em !important;
font-family: 'Monaco', 'Courier New', Courier, monospace !important;
font-size: 0.8em !important;
}
</style>
<meta name="generator" content="FV Code Highlighter - https://frankverhoeven.me/"><!-- Markup (JSON-LD) structured in schema.org ver.4.1.8 START -->
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": "BreadcrumbList",
"itemListElement": [
{
"@type": "ListItem",
"position": 1,
"item": {
"@id": "https://datamix.co.jp",
"name": "データサイエンティストを目指すならデータミックス"
}
},
{
"@type": "ListItem",
"position": 2,
"item": {
"@id": "https://datamix.co.jp/recruit/",
"name": "採用情報"
}
}
]
}
</script>
<!-- Markup (JSON-LD) structured in schema.org END -->
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css">
<script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>
//hljs.initHighlightingOnLoad();
$(function()
{
$( 'pre' ).each(function( i , block )
{
hljs.highlightBlock( block ) ;
} ) ;
} ) ;
</script>
<script>
//スムーズスクロール
jQuery(function(){
// #で始まるアンカーをクリックした場合に処理
jQuery('a[href^=#]').click(function() {
// スクロールの速度
var speed = 400; // ミリ秒
// アンカーの値取得
var href= jQuery(this).attr("href");
// 移動先を取得
var target = jQuery(href == "#" || href == "" ? 'html' : href);
// 移動先を数値で取得
var position = target.offset().top;
// スムーススクロール
jQuery('body,html').animate({scrollTop:position}, speed, 'swing');
return false;
});
});
</script>
<!-- Google Tag Manager -->
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-ND9M7C5');</script>
<!-- End Google Tag Manager -->
<!-- Treasure Data -->
<script type="text/javascript">
!function(t,e){if(void 0===e[t]){e[t]=function(){e[t].clients.push(this),this._init=[Array.prototype.slice.call(arguments)]},e[t].clients=[];for(var r=function(t){return function(){return this["_"+t]=this["_"+t]||[],this["_"+t].push(Array.prototype.slice.call(arguments)),this}},s=["blockEvents","unblockEvents","setSignedMode","setAnonymousMode","resetUUID","addRecord","fetchGlobalID","set","trackEvent","trackPageview","trackClicks","ready","fetchUserSegments"],n=0;n<s.length;n++){var c=s[n];e[t].prototype[c]=r(c)}var o=document.createElement("script");o.type="text/javascript",o.async=!0,o.src=("https:"===document.location.protocol?"https:":"http:")+"//cdn.treasuredata.com/sdk/2.1/td.min.js";var a=document.getElementsByTagName("script")[0];a.parentNode.insertBefore(o,a)}}("Treasure",this);
</script>
<!-- Global site tag (gtag.js) - Google Ads: 685073848 -->
<script async src="https://www.googletagmanager.com/gtag/js?id=AW-685073848"></script>
<script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'AW-685073848');</script>
</head>
<body class="page-template page-template-templates page-template-recruit page-template-templatesrecruit-php page page-id-999 elementor-default">
<header>
<div class="cf">
<div class="logo __desktop __other">
<a href="https://datamix.co.jp/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
</div>
<nav id="Navigation" class="a-topnav transition_quick">
<a class="home_menu hidden_dt" href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
<div class="menu-main-container"><ul id="menu-main" class="cf"><li id="menu-item-1024" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-has-children menu-item-1024"><a href="#">スクール<span>SCHOOL</span></a>
<ul class="sub-menu">
<li id="menu-item-4332" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-4332"><a href="https://datamix.co.jp/data-scientist/">データサイエンティスト育成コース</a></li>
<li id="menu-item-2282" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-2282"><a href="https://datamix.co.jp/introductory-data-scientist-course/">データサイエンティスト準備ステップ</a></li>
<li id="menu-item-3932" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-3932"><a href="https://datamix.co.jp/tdi-course/">英語プログラム- Essential Tools</a></li>
</ul>
</li>
<li id="menu-item-1368" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1368"><a href="https://datamix.co.jp/for-employer/">人材紹介<span>For Employer</span></a></li>
<li id="menu-item-1005" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1005"><a href="https://datamix.co.jp/for-company/">法人向けサービス<span>For Company</span></a></li>
<li id="menu-item-1026" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-has-children menu-item-1026"><a href="#">ニュース<span>NEWS</span></a>
<ul class="sub-menu">
<li id="menu-item-1027" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-1027"><a href="/news">ニュース</a></li>
<li id="menu-item-1317" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-1317"><a href="/event">イベント</a></li>
</ul>
</li>
<li id="menu-item-4015" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-4015"><a href="https://datamix.co.jp/faq/">FAQ<span>FAQ</span></a></li>
<li id="menu-item-1029" class="menu-item menu-item-type-post_type menu-item-object-page current-menu-item page_item page-item-999 current_page_item menu-item-1029"><a href="https://datamix.co.jp/recruit/">採用情報<span>recruit</span></a></li>
<li id="menu-item-1030" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1030"><a href="https://datamix.co.jp/company/">会社概要<span>about us</span></a></li>
</ul></div>
</nav>
<!-- <div class="btn __other __desktop">
<div class="button_hvr yellow"><a href="https://datamix.co.jp/form-seminor/" class="hvr-shutter-out-horizontal"><span class="icon-plane"><b>説明会に申し込む</b></span></a></div> </div>
-->
<div id="MenuIcon" class="menu_button __mobile">
<div>
<div>
<span></span>
<span></span>
<span></span>
<span></span>
</div>
</div>
<span class="roboto uppercase">menu</span>
</div><!--2-->
<!--<div class="information transition_quick __mobile">
<div class="cta twin">
</div>
</div>-->
</div>
</header>
<div id="wrapper">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/css/recruit.css">
<div class="top_label">
<div>
<h1>採用情報</h1>
<p class="roboto uppercase">RECRUIT</p>
</div>
</div>
<div class="content_section">
<p class="breadcrumb"><a href="https://datamix.co.jp/">HOME</a> > <a href="https://datamix.co.jp/recruit/">採用情報</a></p>
</div>
<div class="page-template-blog" style="border-bottom: 1px solid #d4eaf6;">
<div class="blogs content_section cf">
<div class="upper">
<p>私たちデータミックスは、「データサイエンスを当たり前に」することで、データの活用を促し、より良い社会を実現したいと本気で考えています。データミックスはそんな未来を一緒に創っていける仲間に出会えるのを楽しみにしています。</p>
</div>
<div class="message-area">
<div class="content_section_instructor default_page_fontsize">
<h2 class="page_sub_title blue">メンバーからのメッセージ</h2>
<p class="page_main_title blue roboto uppercase"><b>Message</b></p>
<div class="instrucors_box cf">
<ul class="instructors_main">
<li data-card="1" class="on">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2017/04/d_instructor_katada02-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">堅田 洋資<br>
<span>Yosuke Katada</span></h3>
<p class="inst_role">ブートキャンプステップ、ベーシックステップ、アドバンスステップ「レコメンデーション」クラス</p>
<p>代表取締役社長<br />
データサイエンティスト<br />
<br />
データミックスではクライアントの課題を自分事だと捉え、技術的に難易度の高い課題にチャレンジすることを奨励しています。受講生・卒業生の皆さんやクライアント、そしてメンバーと一緒に成長していきたい方の応募をお待ちしています。</p>
</div>
</div>
</div>
</li>
<li data-card="2">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/10/20180415-2435-e1540443636597-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">石井ゆり香<br>
<span>Yurika Ishii</span></h3>
<p class="inst_role">マネージャー</p>
<p>大学卒業後、メーカー、広告制作会社、システムコンサルティング会社で経理を担当。2018年よりデータミックスに参画。<br />
<br />
いろいろな経験を積みたい方、スピード感を持って前向きに仕事に取り組みたい方のご応募をお待ちしています。一緒に会社を盛り上げていきましょう!<br />
</p>
</div>
</div>
</div>
</li>
<li data-card="3">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2017/10/recruit_miyoshi01-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">三好大悟<br>
<span>Daigo Miyoshi</span></h3>
<p class="inst_role">データサイエンティスト</p>
<p>慶応義塾大学理工学部管理工学科 卒業<br />
<br />
データサイエンティストは、やる気と熱意が一番大事だと感じています。私自身まだまだ未熟ですが、良きライバルとなって切磋琢磨できる方・仕事に全力投球できる方と仕事ができたらと思います!一緒にデータミックスを日本一のデータサイエンティスト集団にしましょう!</p>
</div>
</div>
</div>
</li>
<li data-card="4">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4728-e1545289354704-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">渡部孝一<br>
<span>Koichi Watabe</span></h3>
<p class="inst_role">人事マネージャー</p>
<p>エンジニアからキャリアをスタートし、コンサルティングファーム 、IT企業にて、採用、育成、制度、組織開発、労務と幅広く人材マネジメントに従事。2018年よりデータミックスに参画。<br />
当社のミッションビジョンに共感し、「データサイエンス」という新しい市場を一緒に創り上げるパイオニア精神をお持ちの方、ご応募お待ちしております!<br />
</p>
</div>
</div>
</div>
</li>
<li data-card="5">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4856-e1545289569276-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">清水嵩文<br>
<span>Takafumi Shimizu</span></h3>
<p class="inst_role">データ分析コンサルタント</p>
<p>前職ではベンチャー企業でインターネット広告の営業をし、2018年からデータミックスに参画。<br />
データミックスにはスタートアップならではの自由があるので、自分らしく働きたい人にあってると思います!<br />
一緒にデータサイエンスを楽しんで、学んで、活用していきましょう!</p>
</div>
</div>
</div>
</li>
<li data-card="6">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4741-e1545289322125-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">亀岡瑶<br>
<span>Yo Kameoka</span></h3>
<p class="inst_role">データ分析コンサルタント</p>
<p>食品CROにて統計解析職に従事、2018年データミックスに参画。<br />
データミックスでは、教育事業とコンサルティング事業を通じて、確かな知識とスキルを身につけることができます!<br />
スピード感がある環境で、共に刺激し成長していける方をお待ちしています!</p>
</div>
</div>
</div>
</li>
<li data-card="7">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4904-e1545292200368-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">高橋由佳<br>
<span>Yuka Takahashi</span></h3>
<p class="inst_role">スクール運営 サービス&アクイジション担当</p>
<p>ブライダル業界を経て、IT企業・広報担当責任者などのマネジメント業務に従事。2018年からデータミックスに参画。<br />
<br />
仕事を全力で楽しみながら、切磋琢磨しあう私たちです。<br />
受講生・卒業生の皆さんとのコミュニケーションからも、たくさんのことを学べます。<br />
データミックスを一緒に盛り上げてくれる方、ご応募お待ちしております。<br />
<br />
</p>
</div>
</div>
</div>
</li>
</ul>
<ul class="instructors_sub cf">
<li data-thumbnail="1"><img src="https://datamix.co.jp/test/wp-content/uploads/2017/04/d_instructor_katada02-125x168.jpg" alt=""></li>
<li data-thumbnail="2"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/10/20180415-2435-e1540443636597-125x168.jpg" alt=""></li>
<li data-thumbnail="3"><img src="https://datamix.co.jp/test/wp-content/uploads/2017/10/recruit_miyoshi01-125x168.jpg" alt=""></li>
<li data-thumbnail="4"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4728-e1545289354704-125x168.jpg" alt=""></li>
<li data-thumbnail="5"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4856-e1545289569276-125x168.jpg" alt=""></li>
<li data-thumbnail="6"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4741-e1545289322125-125x168.jpg" alt=""></li>
<li data-thumbnail="7"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4904-e1545292200368-125x168.jpg" alt=""></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="content_section page-template-company">
<div class="company-recruit">
<h2 class="page_sub_title blue">採用情報</h2>
<p class="page_main_title blue roboto uppercase"><b>Recruit</b></p>
<div class="wantedly content_section cf">
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/91659/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/110548/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/260266/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/217897/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/311975/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/409755/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/409750/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/399566/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/415750/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/415757"></iframe>
</div>
</div>
</div>
</div>
<div class="blog-area">
<div class="wrap">
<h2 class="page_sub_title blue">ブログ</h2>
<p class="page_main_title blue roboto uppercase"><b>Blog</b></p>
<div class="inner">
<ul class="clearfix">
<li class="clearfix">
<div class="image"><img src="https://datamix.co.jp/test/wp-content/uploads/2020/02/DSC04901-e1581986270347-140x140.jpg" alt=""></div>
<h3 class="h4"><span class="date">2020.02.17</span><a href="https://datamix.co.jp/interview-fujita-coo/">「データサイエンスはMBA以上の武器になる」- データミックスCOO藤田</a></h3>
</li>
<li class="clearfix">
<div class="image"><img src="https://datamix.co.jp/test/wp-content/uploads/2020/02/4432c5645dbce8524c751750ac89e05b-140x140.jpg" alt=""></div>
<h3 class="h4"><span class="date">2020.02.14</span><a href="https://datamix.co.jp/dtst_shimizu/">清水 嵩文_データサイエンティスト育成のフロンティア_インストラクター紹介</a></h3>
</li>
<li class="clearfix">
<div class="image"><img src="https://datamix.co.jp/test/wp-content/uploads/2020/02/Screen-Shot-2020-02-07-at-19.00.18-140x140.png" alt=""></div>
<h3 class="h4"><span class="date">2020.02.06</span><a href="https://datamix.co.jp/blog-what-is-data-science/">データサイエンス(Data Science)とは?</a></h3>
</li>
<li class="clearfix">
<div class="image"><img src="https://datamix.co.jp/test/wp-content/uploads/2020/02/n_23ku_2019-140x140.png" alt=""></div>
<h3 class="h4"><span class="date">2020.02.04</span><a href="https://datamix.co.jp/blog-taiki-jidou/">保育園に入りやすい区はどこ? 〜23区別「待機児童の状況」の変化〜</a></h3>
</li>
</ul>
</div>
<div class="home-news">
<div class="content_section">
<div class="single">
<div class="button_hvr yellow"><a href="/blog" class="hvr-shutter-out-horizontal"><span class="icon-box3"><b>もっと見る</b></span></a></div>
</div>
</div>
</div>
</div>
</div>
<footer>
<div>
<div class="cf">
<div class="logo">
<a href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a><br>
<small class="copyright">©2017 DataMix</small>
</div>
<div class="ft_r">
<nav class="a-footer">
<div class="menu-ftm_new-container"><ul id="menu-ftm_new" class="cf"><li id="menu-item-1039" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-1039"><a href="/news">ニュース</a></li>
<li id="menu-item-4016" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-4016"><a href="https://datamix.co.jp/faq/">スクールに関するFAQ</a></li>
</ul></div> </nav>
<nav class="a-footer">
<div class="menu-footer-container"><ul id="menu-footer" class="cf"><li id="menu-item-54" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-54"><a href="https://datamix.co.jp/company/">会社概要</a></li>
<li id="menu-item-53" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-53"><a href="https://datamix.co.jp/terms-of-service/">利用規約</a></li>
<li id="menu-item-52" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-52"><a href="https://datamix.co.jp/privacy-policy/">プライバシーポリシー</a></li>
<li id="menu-item-51" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-51"><a href="https://datamix.co.jp/act-on-specified-commercial-transaction/">特定商取引法に基づく表記</a></li>
<li id="menu-item-146" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-146"><a href="https://datamix.co.jp/form/">お問い合わせ</a></li>
</ul></div> </nav>
</div>
<p class="ft_robo"><img src="https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/lobo_01.png" alt=""></p>
</div>
</div>
</footer>
</div>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/main.js"></script>
<!--script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script-->
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.bgswitcher.js"></script>
<!--<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/tab.js"></script>-->
<script>
$(function() {
//クリックしたときのファンクションをまとめて指定
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target > .tab_wrap:first').css('display','block');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target2 > .tab_wrap:first').css('display','block');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target3 > .tab_wrap:first').css('display','block');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target4 > .tab_wrap:first').css('display','block');
$('.tab_target5 .tab_wrap').css('display','none');
$('.tab_target5 > .tab_wrap:first').css('display','block');
$('.tab li:first').addClass('select');
$('.tab li').click(function() {
//.index()を使いクリックされたタブが何番目かを調べ、
//indexという変数に代入します。
var index = $('.tab li').index(this);
//コンテンツを一度すべて非表示にし、
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target5 .tab_wrap').css('display','none');
//クリックされたタブと同じ順番のコンテンツを表示します。
$('.tab_target .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target2 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target3 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target4 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target5 .tab_wrap').eq(index).fadeIn("slow");
//一度タブについているクラスselectを消し、
$('.tab li').removeClass('select');
//クリックされたタブのみにクラスselectをつけます。
$(this).addClass('select');
});
});
</script>
<script type="text/javascript">
jQuery(function($) {
$('.slider').bgSwitcher({
images: ['https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_01.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_02.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_03.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_04.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_05.jpg'],
interval: 5000,
effect: "fade"
});
});
$(function() {
$ ('.instructors_sub li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main li.on').removeClass('on');
$('.instructors_main li[data-card="' + id + '"]').addClass('on');
});
$ ('.instructors_sub2 li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main2 li.on').removeClass('on');
$('.instructors_main2 li[data-card="' + id + '"]').addClass('on');
});
var rWidth = $('.round-box').outerWidth();
$('.round-box').css('height', rWidth);
var rWidth2 = $('.roundsec.round3 .round-box').outerWidth();
$('.roundsec.round3 .round-box').css('height', rWidth2);
});
</script>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.tile.js"></script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
$(".blog-area ul li").tile();
var windowSm = 750;
if (windowWidth > 750) {
$(".home-button ul li").tile();
$(".module-stepbox .module-stepbox-total_5 .module-stepbox-step").tile(5);
}
$(window).on("scroll", function() {
scrollHeight = $(document).height();
scrollPosition = $(window).height() + $(window).scrollTop();
footHeight = $("footer").innerHeight();
if (windowWidth < 750) {
if ( scrollHeight - scrollPosition <= footHeight ) {
$('.sp_fix_btn').slideUp();
} else {
$('.sp_fix_btn').slideDown();
}
}
});
});
</script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
if (windowWidth > 750) {
var mainheight = $('.page_main_banner').innerHeight()-70;
// var mainheight = $('.page_main_banner').height()+130;
var lavelheight = $('.top_label').height();
if(mainheight){
var hdheight = mainheight;
}
if(lavelheight){
var hdheight = lavelheight;
}
$('header').css('top', hdheight);
var triggerNode = $("header");
$(window).scroll(function () {
var value = $(this).scrollTop();
var triggerNodePosition = $(triggerNode).offset().top;
// 現在のスクロール位置が引き金要素の位置より下にあれば‥
if (value > hdheight) {
// なんらかの命令を実行
$('header').css({"top": 0,"position":"fixed"});
}else{
$('header').css({'top': hdheight, "position":"absolute"});
}
});
}
});
</script>
<script>
/*
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create','UA-99319144-1','auto');
ga('linker:autoLink', ['datamix.co.jp','datamix-info.resv.jp']);
ga('send','pageview');
*/
</script>
<script type='text/javascript'>
/* <![CDATA[ */
var codePrettifyLoaderBaseUrl = "https:\/\/datamix.co.jp\/test\/wp-content\/plugins\/code-prettify\/prettify";
/* ]]> */
</script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/code-prettify/prettify/run_prettify.js?ver=1.4.0'></script>
<script type='text/javascript'>
/* <![CDATA[ */
var wpcf7 = {"apiSettings":{"root":"https:\/\/datamix.co.jp\/wp-json\/contact-form-7\/v1","namespace":"contact-form-7\/v1"}};
/* ]]> */
</script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/contact-form-7/includes/js/scripts.js?ver=5.1.5'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/youtube-embed-plus/scripts/fitvids.min.js?ver=13.1.2.5'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/wp-embed.min.js?ver=4.9.3'></script>
<!-- リマーケティング タグの Google コード -->
<!--------------------------------------------------
リマーケティング タグは、個人を特定できる情報と関連付けることも、デリケートなカテゴリに属するページに設置することも許可されません。タグの設定方法については、こちらのページをご覧ください。
http://google.com/ads/remarketingsetup
--------------------------------------------------->
<script type="text/javascript">
/* <![CDATA[ */
var google_conversion_id = 852033649;
var google_custom_params = window.google_tag_params;
var google_remarketing_only = true;
/* ]]> */
</script>
<script type="text/javascript" src="//www.googleadservices.com/pagead/conversion.js">
</script>
<noscript>
<div style="display:inline;">
<img height="1" width="1" style="border-style:none;" alt="" src="//googleads.g.doubleclick.net/pagead/viewthroughconversion/852033649/?guid=ON&script=0"/>
</div>
</noscript>
<!-- Yahoo Code for your Target List -->
<script type="text/javascript" language="javascript">
/* <![CDATA[ */
var yahoo_retargeting_id = '4F2M18WOUC';
var yahoo_retargeting_label = '';
var yahoo_retargeting_page_type = '';
var yahoo_retargeting_items = [{item_id: '', category_id: '', price: '', quantity: ''}];
/* ]]> */
</script>
<script type="text/javascript" language="javascript" src="//b92.yahoo.co.jp/js/s_retargeting.js"></script>
<script type="text/javascript">
//Configure an instance for your database
var td = new Treasure({
host: 'in.treasuredata.com',
writeKey: '9610/410a6a4e59ee7703f203ba2c070721601c08a013',
database: 'datamix_marketing',
startInSignedMode: true
});
// Enable cross-domain tracking
td.set('$global', 'td_global_id', 'td_global_id');
// Track pageview information into table
td.trackPageview('pageviews');
</script>
</body>
</html>
"""
MEETUP_API_SOURCE = """
[{"created":1569657937000,"duration":7200000,"fee":{"accepts":"cash","amount":1000.0,"currency":"JPY","description":"","label":"Price","required":false},"id":"265234301","name":"レコメンデーション論文を読む!データミックスゼミ第3回","rsvp_limit":25,"date_in_series_pattern":false,"status":"upcoming","time":1572066000000,"local_date":"2019-10-26","local_time":"14:00","updated":1569657937000,"utc_offset":32400000,"waitlist_count":0,"yes_rsvp_count":4,"venue":{"id":26481303,"name":"データミックス","lat":35.69807815551758,"lon":139.756103515625,"repinned":true,"address_1":"Chiyoda City, Kanda Jinbōchō, 2-chōme−2−44","city":"Tōkyō-to","country":"jp","localized_country_name":"Japan"},"group":{"created":1539055790000,"name":"DataMix.Connect","id":30152644,"join_mode":"approval","lat":35.66999816894531,"lon":139.77000427246094,"urlname":"datamix","who":"メンバー","localized_location":"Tokyo, Japan","state":"","country":"jp","region":"en_US","timezone":"Asia/Tokyo"},"link":"https://www.meetup.com/datamix/events/265234301/","description":"<p>PythonやRのライブラリを使ってデータ分析をしたり、統計学や機械学習の基礎は理解できた。そして、さらなる飛躍に向けて、最先端の論文を読んでみたい・・・</p> <p>そんな皆様を対象に、データミックスゼミ(通称ゼミ)を実験的に始めます。</p> <p>【ゼミの内容】<br/>データミックスゼミでは、3ヶ月間にわたってテーマに沿った論文を複数読み、毎月末に集まって、論文の内容や今後読みたい論文などを話し合っていきます。また、集まらない期間は、わからないことを質問をし合ったり、「面白い!」と思ったことを投稿できるオンライン上のディスカッションボードを用意します。ゼミスタイルですので、ファシリテーターが中心となって、参加者どうしがディスカッションするスタイルです。</p> <p>詳細は、こちらをご覧ください。<br/><a href=\\"https://datamix-seminar.s3-ap-northeast-1.amazonaws.com/datamix-seminar-announce-20190627.pdf\\" class=\\"linkified\\">https://datamix-seminar.s3-ap-northeast-1.amazonaws.com/datamix-seminar-announce-20190627.pdf</a></p> <p>【7月から開始するゼミのテーマ】<br/>7月から開始するテーマはレコメンデーションと自然言語処理です。</p> <p>\u203bこのMeetupはレコメンデーションのキックオフです</p> <p>【スケジュール】<br/>レコメンデーション<br/>7/27(土) 14:00~16:00(キックオフ)(済み)<br/>8/31(土) 14:00~16:00(第1回目対面ゼミ)(済み)<br/>9/28(土) 14:00~16:00(第2回目対面ゼミ)(済み)<br/>10/26(土) 14:00~16:00(第3回目対面ゼミ)</p> <p>【対象者】<br/>データミックスのアドバンスステップを修了されている方で以下の1~3のいずれかに該当する方<br/>1. 機械学習や統計学に関する学術論文を読んでみたいと思っているが、読み方がわからない。<br/>2. 一人で読んでも挫折しそう・・・<br/>3. 読んでて「面白い!」と思った部分を人と共有したい</p> <p>【次回読む論文】<br/>TEM: Tree-enhanced Embedding Model for Explainable Recommendation<br/><a href=\\"https://dl.acm.org/citation.cfm?id=3186066\\" class=\\"linkified\\">https://dl.acm.org/citation.cfm?id=3186066</a></p> <p>【ゲスト参加】<br/>もしご友人で興味がある方がいらっしゃいましたらぜひお誘いください<br/>\u203b在校生・卒業生1名につき2名まで</p> <p>【費用】<br/>各回1,000円(会場払)</p> ","visibility":"public","member_pay_fee":false},{"created":1569663801000,"duration":7200000,"fee":{"accepts":"cash","amount":1000.0,"currency":"JPY","description":"","label":"Price","required":false},"id":"265235011","name":"自然言語処理の論文を読む!データミックスゼミ第3回","rsvp_limit":30,"date_in_series_pattern":false,"status":"upcoming","time":1572076800000,"local_date":"2019-10-26","local_time":"17:00","updated":1569663801000,"utc_offset":32400000,"waitlist_count":0,"yes_rsvp_count":5,"venue":{"id":25967181,"name":"株式会社データミックス ","lat":35.69807052612305,"lon":139.7562713623047,"repinned":true,"address_1":"千代田区神田神保町2-44","address_2":"第2石坂ビル2階","city":"東京都","country":"jp","localized_country_name":"Japan"},"group":{"created":1539055790000,"name":"DataMix.Connect","id":30152644,"join_mode":"approval","lat":35.66999816894531,"lon":139.77000427246094,"urlname":"datamix","who":"メンバー","localized_location":"Tokyo, Japan","state":"","country":"jp","region":"en_US","timezone":"Asia/Tokyo"},"link":"https://www.meetup.com/datamix/events/265235011/","description":"<p>PythonやRのライブラリを使ってデータ分析をしたり、統計学や機械学習の基礎は理解できた。そして、さらなる飛躍に向けて、最先端の論文を読んでみたい・・・</p> <p>そんな皆様を対象に、データミックスゼミ(通称ゼミ)を実験的に始めます。</p> <p>【ゼミの内容】<br/>データミックスゼミでは、3ヶ月間にわたってテーマに沿った論文を複数読み、毎月末に集まって、論文の内容や今後読みたい論文などを話し合っていきます。また、集まらない期間は、わからないことを質問をし合ったり、「面白い!」と思ったことを投稿できるオンライン上のディスカッションボードを用意します。</p> <p>詳細は、こちらをご覧ください。<br/><a href=\\"https://datamix-seminar.s3-ap-northeast-1.amazonaws.com/datamix-seminar-announce-20190627.pdf\\" class=\\"linkified\\">https://datamix-seminar.s3-ap-northeast-1.amazonaws.com/datamix-seminar-announce-20190627.pdf</a></p> <p>【7月から開始するゼミのテーマ】<br/>このミートアップは、【自然言語処理】をテーマにしたゼミです。<br/>\u203bレコメンデーションのゼミ参加希望の方はご注意ください</p> <p>【スケジュール】<br/>7/27(土) 17:00~19:00(キックオフ)済み<br/>8/31(土) 17:00~19:00(第1回目対面ゼミ)済み<br/>9/28(土) 17:00~19:00(第2回目対面ゼミ)済み<br/>10/26(土) 17:00~19:00(第3回目対面ゼミ)</p> <p>【対象者】<br/>データミックスのアドバンスステップを修了されている方で以下の1~3のいずれかに該当する方<br/>1. 機械学習や統計学に関する学術論文を読んでみたいと思っているが、読み方がわからない。<br/>2. 一人で読んでも挫折しそう・・・<br/>3. 読んでて「面白い!」と思った部分を人と共有したい</p> <p>【次回読む論文】<br/><a href=\\"https://arxiv.org/abs/1810.04805\\" class=\\"linkified\\">https://arxiv.org/abs/1810.04805</a></p> <p>【ゲスト参加】<br/>もしご友人で興味がある方がいらっしゃいましたらぜひお誘いください<br/>\u203b在校生・卒業生1名につき2名まで</p> <p>【費用】<br/>各回1,000円(会場払)</p> ","visibility":"public","member_pay_fee":false}]
"""
| [
"ij4nu8d4fw@gmail.com"
] | ij4nu8d4fw@gmail.com |
0bf9f14a7d8f3b313cb14ebe38a4ae36709d9164 | 92237641f61e9b35ff6af6294153a75074757bec | /Algorithm/programmers/lv2/lv2_짝지어 제거하기.py | dc49c17ce25e718214f85eb4831fb672b343a239 | [] | no_license | taepd/study | 8ded115765c4f804813e255d9272b727bf41ec80 | 846d3f2a5a4100225b750f00f992a640e9287d9c | refs/heads/master | 2023-03-08T13:56:57.366577 | 2022-05-08T15:24:35 | 2022-05-08T15:24:35 | 245,838,600 | 0 | 1 | null | 2023-03-05T23:54:41 | 2020-03-08T15:25:15 | JavaScript | UTF-8 | Python | false | false | 278 | py | def solution(s):
stack = []
for e in s:
if not stack:
stack.append(e)
else:
if stack[-1] == e:
stack.pop()
else:
stack.append(e)
if stack:
return 0
else:
return 1 | [
"taepd1@gmail.com"
] | taepd1@gmail.com |
69b384952afa18b41fb769869d637c21f4a61bbb | 2075052d028ed31a30bdb9acb0a2022c2634f52b | /chat/consumers.py | 761dd8369a35c0e33e7d8ef65e1ce163904ade18 | [] | no_license | igoo-Y/live_chat_app | b67704caa2e5944b131a4299716e501b555985b5 | d65c87a35d3f3a120da35290addb798e412dad72 | refs/heads/main | 2023-06-30T13:21:49.860265 | 2021-08-03T09:11:29 | 2021-08-03T09:11:29 | 392,256,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | import json
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope["url_route"]["kwargs"]["room_name"]
self.room_group_name = "chat_%s" % self.room_name
# Join room group
await self.channel_layer.group_add(self.room_group_name, self.channel_name)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(self.room_group_name, self.channel_name)
# Receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json["message"]
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name, {"type": "chat_message", "message": message}
)
# Receive message from room group
async def chat_message(self, event):
message = event["message"]
# Send message to WebSocket
await self.send(text_data=json.dumps({"message": message}))
| [
"79055280+igoo-Y@users.noreply.github.com"
] | 79055280+igoo-Y@users.noreply.github.com |
5ed6d98894bbff63047d401e8e20a8797425bf11 | 3ff5361ce05978b675483092ad33a14d7ed52c6f | /pipeline/bootstrap_pk.py | f4556387a6f7dd2c25b344d825adb017b8c0d11d | [] | no_license | bayu-wilson/lyb_pk2 | 9091010eeb1f9fd8325aad70cd259d3cb37d547b | 33520f5db248886be24950af0da23a20cbacdb95 | refs/heads/master | 2022-11-27T20:40:13.137323 | 2020-07-02T03:24:59 | 2020-07-02T03:24:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,492 | py | #!/usr/bin/env python
import numpy as np
import inis
import pandas as pd
import options as opt
from QuasarSpectrum import QuasarSpectrum
from scipy.interpolate import griddata
tag = inis.tag
cat_name = inis.cat_name
rescale_flux = inis.rescale_flux
QuasarSpectrum.load_cat(cat_name)
nqso = QuasarSpectrum.nqso
print("Loading Data")
qso_list = []
for i in range(nqso):
q = QuasarSpectrum.load_qso_data(i,tag=tag,rescale_flux=rescale_flux)
if "noB" in tag:
q.get_new_forest(rescale_flux=rescale_flux,wrange =
(opt.lyb_min,opt.lyb_max,opt.lyb_rest, opt.xs_beta))
q.get_new_forest(rescale_flux=rescale_flux,wrange =
(opt.ovi_min,opt.ovi_max,opt.ovi_rest_d1, opt.xs_ovi))
q.get_new_forest(rescale_flux=rescale_flux,wrange =
(opt.ovi_min,opt.ovi_max,opt.ovi_rest_d2, opt.xs_ovi))
qso_list.append(q)
print("Done!\n")
# BOOTSTRAP SPECIFIC
np.random.seed(1)
nz_arr = QuasarSpectrum.all_redshifts,QuasarSpectrum.all_names
M = inis.M # M Samples
mf_msrmnts = ['mf_a', 'nvar_a','dloglambda_a', 'npow_a',
'mf_tot', 'nvar_tot','dloglambda_tot', 'npow_tot','z','mf_b',
'var_atot','npow_atot']
n_mf_msrmnts = len(mf_msrmnts)
pk_msrmnts = ['k','Paa','Ptot','Pab','Qab','Pbb','num','z']
n_pk_msrmnts = len(pk_msrmnts)
mf_bootstrap = np.zeros((M,opt.zbinlen,n_mf_msrmnts))
pk_bootstrap = np.zeros((M,opt.kbinlen*opt.zbinlen, n_pk_msrmnts))
# Paa_table = []
# Pbb_table = []
# Pab_table = []
# Qab_table = []
for m in range(M):
opt.updt(M, m)
qmask = np.floor(np.random.rand(nqso)*nqso).astype(int)
qso_arr = np.array(qso_list)[qmask]
zbin_msr_matrix = np.zeros((opt.zbinlen,n_mf_msrmnts))
# flux_pdf = [[] for tmp in range(opt.zbinlen)]
# pdf_bins = np.arange(-0.025,1.05,.05)
for zidx in range(opt.zbinlen):
msrmnt_in_zbin = np.zeros(n_mf_msrmnts)
count_in_zbin = np.zeros(n_mf_msrmnts)
#opt.updt(opt.zbinlen, zidx)
zbin_msrmnt = [[] for idx in range(n_mf_msrmnts)]
for i in range(nqso):
#i=85
#zidx = 6
zpix_a = qso_arr[i].get_zpix(opt.lya_rest)
name = qso_arr[i].name
mask = qso_arr[i].get_zmask(forest=(opt.lya_min,opt.lya_max,opt.lya_rest),
zpix=zpix_a,zidx=zidx,zedges=opt.zbin_edges,name=name)
#FLUX PDF
#flux_pdf[zidx].append(np.histogram(qso_arr[i].flux[mask],bins=pdf_bins)[0])#/np.nansum(mask))
zpix_b = qso_arr[i].get_zpix(opt.lyb_rest) # Here is where I want to change optical depth of lyb pixels
mask_b = qso_arr[i].get_zmask(forest=(opt.lyb_min,opt.lyb_max,opt.lyb_rest),
zpix=zpix_b,zidx=zidx,zedges=opt.zbin_edges,name=name)
za = zpix_a[mask]#qso_arr[i].wavelength[mask]/opt.lya_rest-1
ztot = zpix_b[mask_b]#qso_arr[i].wavelength[mask_b]/opt.lyb_rest-1
try:
new_af_mask = (za>np.min(ztot))&(za<np.max(ztot))
new_bf_mask = (ztot>np.min(za))&(ztot<np.max(za))
ferra = qso_arr[i].err_flux[mask][new_af_mask]
ferrtot = qso_arr[i].err_flux[mask_b][new_bf_mask]
# Interpolating to the smaller one
if len(ferrtot)<=len(ferra):
ferra = griddata(za[new_af_mask],ferra,ztot[new_bf_mask],method='linear')
ferra = ferra[np.isfinite(ferra)]
else:
ferrtot = griddata(ztot[new_bf_mask],ferrtot,za[new_af_mask],method='linear')
ferrtot = ferrtot[np.isfinite(ferrtot)]
#print(np.nansum(ferra*ferrtot))
msrmnt_in_zbin[10]+= np.sum(ferra*ferrtot)*0 #CHANGED 5/3/19 after meeting with Matt
# var lya-tot 10
count_in_zbin[10] += len(ferra) # len lya_tot 10
except:
pass
msrmnt_in_zbin[0]+= np.sum(qso_arr[i].flux[mask]) # mf lya 0
count_in_zbin[0] += np.sum(mask) # len lya 0
msrmnt_in_zbin[1]+= np.sum(qso_arr[i].err_flux[mask]**2) # var lya 1
count_in_zbin[1] += np.sum(mask) # len lya 1
msrmnt_in_zbin[2]+= np.sum(qso_arr[i].dloglambda[mask]) # dloglam lya 2
count_in_zbin[2] += np.sum(mask) # len lya 2
msrmnt_in_zbin[4]+= np.sum(qso_arr[i].flux[mask_b]) # mf tot 4
count_in_zbin[4] += np.sum(mask_b) # len tot 4
msrmnt_in_zbin[5]+= np.sum(qso_arr[i].err_flux[mask_b]**2) # var tot 5
count_in_zbin[5] += np.sum(mask_b) # len tot 5
msrmnt_in_zbin[6]+= np.sum(qso_arr[i].dloglambda[mask_b]) # dloglam tot 6
count_in_zbin[6] += np.sum(mask_b) # len tot 6
zbin_msr_matrix[zidx] = msrmnt_in_zbin/count_in_zbin
#print(count_in_zbin[0])
#print(msrmnt_in_zbin[0])
#opt.updt(opt.zbinlen, opt.zbinlen)
#print("Done!\n")
# npow alpha 3
zbin_msr_matrix.T[3] = list(QuasarSpectrum.get_npow(mf=zbin_msr_matrix.T[0],
nvar=zbin_msr_matrix.T[1],
dloglambda=zbin_msr_matrix.T[2]))
# npow total 7
zbin_msr_matrix.T[7] = list(QuasarSpectrum.get_npow(mf=zbin_msr_matrix.T[4],
nvar=zbin_msr_matrix.T[5],
dloglambda=zbin_msr_matrix.T[6]))
# zbins 8
zbin_msr_matrix.T[8] = opt.zbin_centers
# npow lya-tot 11
zbin_msr_matrix.T[11] = ((zbin_msr_matrix.T[4]*zbin_msr_matrix.T[0])**(-1)*
zbin_msr_matrix.T[10] * np.pi / (opt.kmax-opt.kmin))
#print('1',zbin_msr_matrix.T[10])
mf_output_df = pd.DataFrame(zbin_msr_matrix)
mf_output_df.columns = mf_msrmnts
zab_centers = opt.find_za(opt.zbin_centers) #converting lyb zbins to the equivalent, lower, lya zbins
len_zab = len(zab_centers)
#Gives corresponding lya bin for each lyb bin. organized by increasing z.
bin_zab=np.ones(len_zab)*np.nan
for i in range(len_zab):
for j in range(len_zab):
if (zab_centers[i]>opt.zbin_edges[j])&(zab_centers[i]<opt.zbin_edges[j+1]):
bin_zab[i] = (opt.zbin_centers[j])
mf_lyb = np.ones(len_zab)*np.nan #nan until proven otherwise
for i in range(len_zab):
if bin_zab[i] in mf_output_df.z.values:
za_idx = mf_output_df.z == bin_zab[i]
ztot_idx = i
mf_lyb[i] = mf_output_df.mf_tot[ztot_idx]/mf_output_df.mf_a[za_idx]
mf_output_df['mf_b'] = mf_lyb
mf_bootstrap[m] = mf_output_df
# POWER SPECTRUM
znk_matrix = np.zeros((opt.zbinlen,n_pk_msrmnts,opt.kbinlen)) # 7 zbins,6 measurements, 20 kbins
#print("Pk")
for zidx in range(opt.zbinlen):
#opt.updt(opt.zbinlen, zidx)
msrmnt_in_kbin = np.zeros((n_pk_msrmnts,opt.kbinlen))
count_in_kbin = np.zeros((n_pk_msrmnts,opt.kbinlen))
msrmnt_in_kbin[0] = opt.kbin_centers
count_in_kbin[0] = np.ones_like(opt.kbin_centers)
count_in_kbin[6] = np.ones_like(opt.kbin_centers)
msrmnt_in_kbin[7] = np.ones_like(opt.kbin_centers) * opt.zbin_centers[zidx]
count_in_kbin[7] = np.ones_like(opt.kbin_centers)
for qidx in range(nqso):
# LYA FOREST: P ALPHA ALPHA
zpix_a = qso_arr[qidx].get_zpix(opt.lya_rest)
zmask_a = qso_arr[qidx].get_zmask(forest=(opt.lya_min,opt.lya_max,opt.lya_rest),
zpix=zpix_a,zidx=zidx,zedges=opt.zbin_edges,name=name)
if np.sum(zmask_a)>opt.min_pix:
kpix,pk = qso_arr[qidx].get_autopower(mf_output_df.mf_a[zidx],zmask_a)
for kidx in range(opt.kbinlen):
npow = mf_output_df.npow_a.values[zidx]
kmask = qso_arr[qidx].get_kmask(kpix=kpix,kidx=kidx,kedges=opt.kbin_edges)
pk_sub = qso_arr[qidx].get_pk_subsets(kpix=kpix,pk=pk,zmask=zmask_a,kmask=kmask,
corr_tag=tag,npow=npow)
#znk_matrix[zidx][1,kidx] += np.sum(pk_sub)
#znk_matrix[zidx][6,kidx] += len(pk_sub)
msrmnt_in_kbin[1,kidx] += np.sum(pk_sub) #Paa
#msrmnt_in_kbin[6,kidx] += len(pk_sub)
count_in_kbin[1,kidx] += len(pk_sub) #num is Paa
# LYB FOREST: P TOTAL TOTAL
zpix_tot = qso_arr[qidx].get_zpix(opt.lyb_rest)
zmask_tot = qso_arr[qidx].get_zmask(forest=(opt.lyb_min,opt.lyb_max,opt.lyb_rest),
zpix=zpix_tot,zidx=zidx,zedges=opt.zbin_edges,name=name)
if (np.sum(zmask_tot)>opt.min_pix):
kpix,pk = qso_arr[qidx].get_autopower(mf_output_df.mf_tot[zidx],zmask_tot)
for kidx in range(opt.kbinlen):
npow = mf_output_df.npow_tot.values[zidx]
kmask = qso_arr[qidx].get_kmask(kpix=kpix,kidx=kidx,kedges=opt.kbin_edges)
pk_sub = qso_arr[qidx].get_pk_subsets(kpix=kpix,pk=pk,zmask=zmask_tot,kmask=kmask,
corr_tag=tag,npow=npow)
msrmnt_in_kbin[2,kidx] += np.nansum(pk_sub)
count_in_kbin[2,kidx] += len(pk_sub)
#Cross power
if (np.sum(zmask_a)>opt.min_pix)&(np.sum(zmask_tot)>opt.min_pix):
kpix,pab,qab,dlam,res = qso_arr[qidx].cross_pk_fft(mask_lya=zmask_a,mask_lyb=zmask_tot,
mf_lya=mf_output_df.mf_a[zidx],
mf_lyb=mf_output_df.mf_tot[zidx])
npow = mf_output_df.npow_atot.values[zidx]
for kidx in range(opt.kbinlen):
kmask = qso_arr[qidx].get_kmask(kpix=kpix,kidx=kidx,kedges=opt.kbin_edges)
pab_sub,qab_sub = qso_arr[qidx].get_xpk_subsets(kpix,pab,qab,dlam,res,tag,npow,kmask)
#msrmnt_in_kbin[2,kidx] += np.nansum(pk_sub)
#count_in_kbin[2,kidx] += len(pk_sub)
# msrmnt_in_kbin[3,kidx] += np.nansum(pab_sub) #remove!
#print(np.nansum(pab_sub))
msrmnt_in_kbin[3,kidx] += np.nansum(pab_sub)
count_in_kbin[3,kidx] += len(pab_sub)
msrmnt_in_kbin[4,kidx] += np.sum(qab_sub) #remove!
count_in_kbin[4,kidx] += len(qab_sub)
msrmnt_in_kbin[6,kidx] += len(pab_sub)
znk_matrix[zidx] = msrmnt_in_kbin/count_in_kbin
#opt.updt(opt.zbinlen, opt.zbinlen)
#print("Done!\n")
# Finding Lyman beta power
for i in range(len_zab):
if bin_zab[i] in opt.zbin_centers:
za_idx = np.where(opt.zbin_centers == bin_zab[i])[0][0]
znk_matrix[i][5] = znk_matrix[i][2]-znk_matrix[za_idx][1]
# Making 3d pk matrix into 2d pk data frame
x = pd.DataFrame(znk_matrix[0].T,columns=pk_msrmnts)
for i in range(1,opt.zbinlen):
x = x.append(pd.DataFrame(znk_matrix[i].T,columns=pk_msrmnts))
#print(np.shape(x))
pk_bootstrap[m] = x
mf_bootstrap_2d = np.concatenate(mf_bootstrap.T)
pk_bootstrap_2d = np.concatenate(pk_bootstrap.T)
#np.reshape(mf_bootstrap.T,(M,opt.zbinlen*n_mf_msrmnts))
if inis.save_boot_mf:
np.savetxt(inis.save_boot_mf_path,mf_bootstrap_2d)#).to_csv(inis.save_boot_mf_path, index=False)
if inis.save_boot_pk:
np.savetxt(inis.save_boot_pk_path,pk_bootstrap_2d)
# print(pk_bootstrap[m])
opt.updt(M, M)
# PLOTTING ROUTINE
# import matplotlib.pyplot as plt
# qwer = mf_bootstrap.T[0]
# # Add a colorbar
# fig,ax = plt.subplots(1, 2,gridspec_kw={'width_ratios': [1,1]})
# fig.set_size_inches(12,8)
#
# im = ax[0].imshow(np.corrcoef(qwer), cmap = plt.cm.jet)
# fig.colorbar(im, ax=ax[0])
# # set the color limits - not necessary here, but good to know how.
# im.set_clim(0.0, 1.0)
# #plt.show()
#
# x = [np.var(qwer.T[:i].T) for i in range(1,M)]
# ax[1].plot(x)
# med = np.median(x)
# ax[1].set_ylim(med - med*0.01,med + med*0.01)
# fig.tight_layout()
# fig.savefig('../plot/figures/test.pdf')
# plt.clf()
# plt.show()
# print()
# qwer = np.reshape(np.concatenate(mf_bootstrap.T,axis=1)[0],(M,n_mf_msrmnts))
# print("HERE")
# print(qwer)
# print(qwer)
# print(np.concatenate(mf_bootstrap.T,axis=1)[0])
# qwer = np.reshape(np.concatenate(mf_bootstrap.T,axis=1)[0],(M,n_mf_msrmnts))
# # print(qwer)
# print(np.corrcoef(qwer))
# import matplotlib.pyplot as plt
# plt.imshow(np.corrcoef(qwer))
# plt.show()
# print()
# print(np.nanmean(mf_bootstrap.T,axis=2))
# print(np.nanvar(mf_bootstrap.T,axis=2))
# print(np.corrcoef(np.reshape(np.concatenate(mf_bootstrap,axis=1)[0],(M,n_mf_msrmnts))))
# print(np.mean(mf_bootstrap.T[1],axis=1))/
# for i in range(n_mf_msrmnts):
# #print(mf_bootstrap[0].T[i])
# print(np.nanmean(mf_bootstrap[0].T[i]))
# print(np.nanvar(mf_bootstrap[0].T[i]))
#
# print(np.nanmean(mf_bootstrap[0].T,axis=1))
# print('0')
# print(mf_bootstrap[0].T)
# print("1")
# print(mf_bootstrap[0].T[0])
# print("2")
# print(np.nanmean(mf_bootstrap[0].T[0]))
# print("3")
# print(np.nanmean(mf_bootstrap[0],axis=1))
# print("4")
| [
"bwilson3853@gmail.com"
] | bwilson3853@gmail.com |
aa41fbd83ac1923d6fda08de4cc8f3ebd55904e0 | 90390ddcc21d2f2c0dd5ee3c0e7a3d8d61be9638 | /wsgi/app/forms.py | 4141cbb7183fc430344eb1bf806ca44a244d8598 | [
"MIT"
] | permissive | pjamesjoyce/lcoptview_legacy | b27926e31c16f1fca07c6294e66d706fcb600682 | e0ebeb155d6f62d8619d33cf48db98bab8b7a4cd | refs/heads/master | 2021-07-16T11:38:58.451239 | 2017-09-26T10:43:50 | 2017-09-26T10:43:50 | 107,691,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | from flask_wtf import FlaskForm
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
login_data = TextField('username or email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
class RegistrationForm(FlaskForm):
username = TextField('username', validators=[DataRequired()])
email = TextField('email', validators=[DataRequired()])
password = PasswordField('password', validators=[DataRequired()])
password_repeat = PasswordField('repeat password', validators=[DataRequired()])
| [
"pjamesjoyce@gmail.com"
] | pjamesjoyce@gmail.com |
fc08528884d6626eaf34cd5a97c19cbd3a353798 | 24a9f5f1b8d8b550b9b70f5837e7de94ca76692c | /Lab07.py | 2ba74a2602492c526e417c46f3f8c003551fb5f7 | [] | no_license | syfiawoo/python-graphics-lab | 2548dfe45f9da50fa5f2578bf9489261c89b7681 | edd1fec368f28219aca2ebf9f709caaca53270e3 | refs/heads/master | 2021-01-20T19:00:26.232748 | 2012-07-03T10:17:08 | 2012-07-03T10:17:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | from graphics import *
#add any functions or classes you might define here
# create a window with width = 700 and height = 500
win = GraphWin('Program Name', 700, 500)
# add your code below this point
win.mainloop()
| [
"moi@fiawoo-PC.(none)"
] | moi@fiawoo-PC.(none) |
542448cbdd184491b07c34dbda1e18c8b39b173d | 8cdd03812d2cfad8fd9f9d5c83b38a59f9c9c8e8 | /Code/Backend/Test/stepper2.py | 7c6561f3f043ca1ceefb8becef5888d374680e6f | [] | no_license | MaenhoudtTom/project1 | 46d018ee1a9abd72f90c1f2124ad0a00bb0c46fa | ad050accf62aa629bef1d77922dc2207dd78df01 | refs/heads/master | 2022-11-07T23:14:31.559237 | 2020-06-24T07:07:09 | 2020-06-24T07:07:09 | 271,761,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
control_pins = [12, 16, 20, 21]
for pin in control_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
halfstep_seq = [
[1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1]
]
for i in range(64):
for halfstep in range(8):
for pin in range(4):
GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])
time.sleep(0.001)
GPIO.cleanup() | [
"55881682+MaenhoudtTom@users.noreply.github.com"
] | 55881682+MaenhoudtTom@users.noreply.github.com |
66329e2ef7a59a9819fbce4fe1a7b235e7eb2f9b | e90fdf3c083b43ebe5ead840190b81fd923e2798 | /Employee.py | 122dbf7023239dcd9345a05620e0749701118341 | [] | no_license | michaelgagliardi/2019_2020_projects | c643ba68aff6bf634a07c0e8863303eebe6defac | 217d32eaaea9a0f3a9bb5ff069f4e21492ec3b64 | refs/heads/main | 2023-04-03T03:20:19.293052 | 2021-04-05T21:01:36 | 2021-04-05T21:01:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | ######### to complete
class Employee:
def __init__(self,ID="none",name="none",age=0,exp=0,job="none",is_from_MA="none"):
self.ID=ID
self.name=name
self.age=age
self.exp=exp
self.job=job
self.is_from_MA=is_from_MA
| [
"noreply@github.com"
] | michaelgagliardi.noreply@github.com |
24fb58232485d43c8a84ef050f7eb2521e36f3dc | 811ecc8def16dc1bdb8f5a5c2c9e1d01b3f9c319 | /chat_bot/handlers/simple_qa.py | 54c7f4b080136b0e97702dec899b7b12a668befc | [] | no_license | jiaojianglong/MyBot | 7dd00959d6cf54d728b213b77084ce375b731ad3 | 9781b183cf168832b3c962d420e7f0a63287c4db | refs/heads/master | 2020-06-13T22:34:15.829604 | 2020-03-29T01:31:24 | 2020-03-29T01:31:24 | 194,809,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/2/22
# @Author : JiaoJianglong
from handlers.base.basehandler import QABaseHandler
from models.es.rg_search_question import RGSearchQuestion
from models.es.rg_question_answer import RGQuestionAnswer
class SimpleQAHandler(QABaseHandler):
question_model = RGSearchQuestion()
answer_model = RGQuestionAnswer()
@QABaseHandler.decorator.threadpool_decorator
def post(self, *args, **kwargs):
result = self.init_parameter()
content = self.get_argument("content")
parameter = {"content":content}
process_list = [
self.process.QuestionSearchProcessor,#问题检索
]
process_instance = self.process_flow(process_list)
result_parameter = process_instance.handle(parameter)
result = result_parameter.get("result")
return result
| [
"447151999@qq.com"
] | 447151999@qq.com |
67a41c24e4933a1aeb0fd35e31eb01bda1c58d33 | d2b0a5d0702162f32fa277103a2962a9b34b3850 | /demo/demo_special_purpose_publication_occupancy.py | 9fd9fcc768d9047027291e00faf328dd7c0f13c4 | [] | no_license | ruoxijia/pad | e0835b5ce39de1ce8c0a85d8387cebfbe0248c79 | 7a8c4710c82c2f45454458dd992a88ea8fde82b1 | refs/heads/master | 2021-10-10T17:28:59.560879 | 2017-11-02T06:28:21 | 2017-11-02T06:28:21 | 109,215,905 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,685 | py | from helper import Utilities, PerformanceEvaluation
import pandas as pd
from metric_learning import Subsampling, MetricLearning
from user_feedback import Similarity
from scipy.misc import comb
"""
In the demo, we will showcase an example of special purpose publication.
The data user wants the published database to maximally retain the information about lunch time.
"""
# Initialization of some useful classes
util = Utilities()
pe = PerformanceEvaluation()
mel = MetricLearning()
# step 1: get the database to be published
day_profile = pd.read_pickle('../dataset/dataframe_all_binary.pkl')
day_profile = day_profile.iloc[0::4,0::60]
rep_mode = 'mean'
anonymity_level = 2 # desired anonymity level
# step 2: data user specifies his/her interest. In the example, the data user is interested in preserving the
# information of a segment of entire time series. In this case, he/she would also need to specify the starting and
# ending time of the time series segment of interest.
interest = 'segment'
window = [11,15] # window specifies the starting and ending time of the period that the data user is interested in
# step 3: pre-sanitize the database
sanitized_profile_baseline = util.sanitize_data(day_profile, distance_metric='euclidean',
anonymity_level=anonymity_level,rep_mode = rep_mode)
loss_generic_metric = pe.get_information_loss(data_gt=day_profile,
data_sanitized=sanitized_profile_baseline.round(),
window=window)
print("information loss with generic metric %s" % loss_generic_metric)
df_subsampled_from = sanitized_profile_baseline.drop_duplicates().sample(frac=1)
subsample_size_max = int(comb(len(df_subsampled_from),2))
print('total number of pairs is %s' % subsample_size_max)
# step 4: sample a subset of pre-sanitized database and form the data points into pairs
subsample_size = int(round(subsample_size_max))
sp = Subsampling(data=df_subsampled_from)
data_pair = sp.uniform_sampling(subsample_size=subsample_size)
# User receives the data pairs and label the similarity
sim = Similarity(data=data_pair)
sim.extract_interested_attribute(interest=interest, window=window)
similarity_label, class_label = sim.label_via_silhouette_analysis(range_n_clusters=range(2,8))
# step 5: PAD learns a distance metric that represents the interest of the user from the labeled data pairs
# lam_vec is a set of candidate lambda's for weighting the l1-norm penalty in the metric learning optimization problem.
# The lambda that achieves lowest testing error will be selected for generating the distance metric
dist_metric = mel.learn_with_simialrity_label_regularization(data=data_pair,
label=similarity_label,
lam_vec=[0,0.1,1,10],
train_portion=0.8)
# step 6: the original database is privatized using the learned metric
sanitized_profile = util.sanitize_data(day_profile, distance_metric="mahalanobis",
anonymity_level=anonymity_level, rep_mode=rep_mode, VI=dist_metric)
# (optionally for evaluation purpose) Evaluating the information loss of the sanitized database
loss_learned_metric = pe.get_information_loss(data_gt=day_profile,
data_sanitized=sanitized_profile.round(),
window=window)
print("sampled size %s" % subsample_size)
print("information loss with learned metric %s" % loss_learned_metric)
| [
"ruoxijia@berkeley.edu"
] | ruoxijia@berkeley.edu |
63acff57f79dae33fea912cb9cec6449292df6f5 | b3717c1c9eb195c0d6205a653f97cb719118c166 | /blog/models.py | 744ae9bd29bad66ab8edd786bab55a1069e098a9 | [] | no_license | Harithmech/my-first-blog | c551323a295da0682e56067af900731e1af43fd6 | bccf0a258ba1e361fa8c5ec5f00c5cc9b9498bbe | refs/heads/master | 2023-02-28T16:01:50.138599 | 2021-02-11T11:45:21 | 2021-02-11T11:45:21 | 266,046,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | from django.db import models
# Create your models here.
from django.conf import settings
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"harithmech@gmail.com"
] | harithmech@gmail.com |
e262beb5a4a594f5ccd34d82a63f92f441f87e62 | 786550172250f9f9b14bd923151efc4759349c61 | /dlkit/relationship/license.py | 6abeebe8829ffb7bb0c4f68c4f65d28aaaa9cab3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mitsei/dlkit-doc | 6d88c1686fa9395047b43028b41cd4315c5bacc4 | b320d57e91bfb32fd88e1fce01d3ddb5935aa9dd | refs/heads/master | 2021-01-14T12:47:42.203154 | 2015-05-11T02:47:03 | 2015-05-11T02:47:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | # -*- coding: utf-8 -*-
"""Relationship Open Service Interface Definitions
relationship version 3.0.0
Copyright (c) 2010 Ingenescus. All Rights Reserved.
This Work is being provided by the copyright holder(s) subject to the
following license. By obtaining, using and/or copying this Work, you
agree that you have read, understand, and will comply with the following
terms and conditions.
Permission to use, copy and distribute unmodified versions of this Work,
for any purpose, without fee or royalty is hereby granted, provided that
you include the above copyright notices and the terms of this license on
ALL copies of the Work or portions thereof.
You may nodify or create Derivatives of this Work only for your internal
purposes. You shall not distribute or transfer any such Derivative of
this Work to any location or to any third party. For the purposes of
this license, "Derivative" shall mean any derivative of the Work as
defined in the United States Copyright Act of 1976, such as a
translation or modification.
This Work and the information contained herein is provided on an "AS IS"
basis WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS IN THE WORK.
The export of software employing encryption technology may require a
specific license from the United States Government. It is the
responsibility of any person or organization contemplating export to
obtain such a license before exporting this Work.
""" | [
"birdland@Jeff-PM13.local"
] | birdland@Jeff-PM13.local |
c7538c4194854b3c7982b74de054fe7f78f8728e | 1d70ee049c5198b75567e0000c45ef879f6d39be | /JobMatchPonos/server/modules/cv/cv_api.py | 25e0c70dc1df46469890788a794b2b58526bf6f8 | [] | no_license | alexawl/Job-Match-Ponos-Back | 95f28185f71c38733973bc6d730947455c2e6c93 | c48b4bfddfbf2f4f5aa95409fd2c6ee4f469d9dd | refs/heads/master | 2022-09-30T15:07:19.875016 | 2020-02-01T06:50:34 | 2020-02-01T06:50:34 | 237,575,072 | 0 | 0 | null | 2021-05-06T20:07:41 | 2020-02-01T06:49:35 | Python | UTF-8 | Python | false | false | 463 | py | from flask_restful import Resource
from flask import request, session
from mongoengine import NotUniqueError
import json
from mongoengine import connect
from jobmatcher.server.authentication.authentication import require_authentication
from jobmatcher.server.authentication.web_token import generate_access_token
from jobmatcher.server.modules.cv import cv_schemas
from jobmatcher.server.utils import utils as u
from jobmatcher.server.modules.cv.CV import CV
| [
"alexawl@bellsouth.net"
] | alexawl@bellsouth.net |
a8271bda23a6157bb2fe3136fe4736b8542b2437 | 1bc350d7a026763ace5d007958e093cf2eeada93 | /cos_distance.py | 66dcb2fa6cd48fde2a06f125d88c7a26f2718323 | [] | no_license | yuridadt/recommender_systems_lab | 2d6abb32e4f7cdd46816fbfcac97903a3b02a9f8 | 15025a1949748a65ef4eb1836dbdd643aebec805 | refs/heads/master | 2020-12-09T17:40:23.484989 | 2020-01-16T20:34:15 | 2020-01-16T20:34:15 | 233,372,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,198 | py | #Рассчитываем косинусное расстояние для кр
#https://habr.com/ru/post/150399/
import csv
import math
#обходим tuple unpacking in python 3
def star(f):
return lambda args: f(*args)
def ReadFile (filename = "cos.csv"):
f = open (filename)
r = csv.reader (f)
mentions = dict()
for line in r:
user = line[0]
product = line[1]
rate = float(line[2])
if not user in mentions:
mentions[user] = dict()
mentions[user][product] = rate
f.close()
return mentions
def distCosine (vecA, vecB):
def dotProduct (vecA, vecB):
d = 0.0
for dim in vecA:
if dim in vecB:
d += vecA[dim]*vecB[dim]
return d
return dotProduct (vecA,vecB) / math.sqrt(dotProduct(vecA,vecA)) / math.sqrt(dotProduct(vecB,vecB))
def makeRecommendation (userID, userRates, nBestUsers, nBestProducts):
matches = [(u, distCosine(userRates[userID], userRates[u])) for u in userRates if u != userID]
bestMatches = sorted(matches, key=star(lambda x,y: (y ,x)), reverse=True)[:nBestUsers]
print ("Most correlated with '%s' users:" % userID)
for line in bestMatches:
print (" UserID: %6s Coeff: %6.4f" % (line[0], line[1]))
sim = dict()
sim_all = sum([x[1] for x in bestMatches])
bestMatches = dict([x for x in bestMatches if x[1] > 0.0])
for relatedUser in bestMatches:
for product in userRates[relatedUser]:
if not product in userRates[userID]:
if not product in sim:
sim[product] = 0.0
sim[product] += userRates[relatedUser][product] * bestMatches[relatedUser]
for product in sim:
sim[product] /= sim_all
bestProducts = sorted(sim.items(), key=star(lambda x,y: (y,x)), reverse=True)[:nBestProducts]
print ("Most correlated products:")
for prodInfo in bestProducts:
print (" ProductID: %6s CorrelationCoeff: %6.4f" % (prodInfo[0], prodInfo[1]))
return [(x[0], x[1]) for x in bestProducts]
if __name__ == '__main__':
rec = makeRecommendation('ivan', ReadFile(), 5, 5)
print ('...end of calculations...') | [
"48838615+yuridadt@users.noreply.github.com"
] | 48838615+yuridadt@users.noreply.github.com |
766305b90df2cd606aef9971c1c2f21fb0412565 | fc557979eb04d242f892303dde54b47616a2f74b | /blogProject/website/models.py | 1af510c90ecfbd423549a1ee5b579993d5366434 | [] | no_license | supr-pr/blogProject | 99ad3ee03b004c56bc44551847bf3e123ab6a7cf | b77775f05f8b0929f854794ca2a0c68f089bc7b8 | refs/heads/master | 2021-01-02T09:19:02.479888 | 2017-08-21T10:11:23 | 2017-08-21T10:11:23 | 99,191,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,830 | py | from django.db import models
from django.forms import ModelForm
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
# from comments.models import Comment
# @property
# def commens(self):
# qs = Comment.objects.filter_by_instance(instance)
# return qs
class Category(models.Model):
category = models.CharField(max_length=100, default='Blog')
def __str__ (self):
return self.category
class Author(models.Model):
author = models.CharField(max_length=100, default='Anonymous')
def __str__(self):
return self.author
# class Cmnt(models.Model):
# title = models.CharField(max_length=100)
# date = models.DateTimeField()
# user = models.ForeignKey(Author)
# user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1)
# content_type = models.ForeignKey(ContenType, on_delete= models.CASCADE)
# object_id = models.PositiveIntegerField()
# comment_object = GenericForeignKey('content_type', 'object_id')
# content = models.TextField()
# timestamp = models.DateTimeField(auto_now_add=True)
# objects = CommentManager()
# # def get_fields_and_values(self):
# # return [(field, field.value_to_string(self)) for field in Post._meta.fields]
# def __str__ (self):
# return str(self.user.username)
# # return self.author
# class CommentManager(models.Manager):
# def filter_by_instance(self,instance):
# content_type = ContenType.objects.get_for_model(instance, __class__)
# obj_id = instance.obj_id
# # comments = Comment.objects.filter(content_type=content_type, obj_id= obj_id)
# qs = super (CommentManager, self).filter(content_type=content_type, object_id=obj_id)
# # comments = Comment.objects.filter(content_type=content_type, object_id=obj_id)
# return qs
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
date = models.DateTimeField()
author = models.ForeignKey(Author)
category = models.ForeignKey(Category)
# cmnt = models.ForeignKey(Cmnt)
# def get_fields_and_values(self):
# return [(field, field.value_to_string(self)) for field in Post._meta.fields]
def __str__ (self):
return self.title
class Comnt(models.Model):
content = models.TextField(default='')
# date = models.DateTimeField(auto_now=True, auto_now_add=False)
date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(Author)
comnt_on = models.ForeignKey(Post)
updated = models.DateTimeField(auto_now=True)
my_date_field = models.DateTimeField(auto_now=True, auto_now_add=False)
def __str__ (self):
return self.content
@property
def title(self):
return self.content
class ComntForm(ModelForm):
class Meta:
model = Comnt
fields = [
'content',
'author',
# 'email2'
'comnt_on'
] | [
"supr.pr@gmail.com"
] | supr.pr@gmail.com |
cfd05d15243758d9c263176fe03fefdad10205cd | e7287d286d127311118d2689bd97b513b95ae046 | /ext/exmpSorted.py | d5da2e587885bca0af4ae73ba5e6eae3ed0a88ce | [] | no_license | Reihan-amn/DiseaseClassificationProlem_Kaggle | 3da32457d45165d2cb6549054b72203604e12481 | 9f1407e33a910a37af3eced2c5ddebdf4d3f7530 | refs/heads/master | 2021-05-11T00:49:05.955718 | 2018-01-21T07:14:18 | 2018-01-21T07:14:18 | 118,311,674 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | from ext import exmpDataFact
Bclass = exmpDataFact.getDataMatrixFromCSV("./DeathRecords.csv")
print('type: ', type(Bclass),' shape: ',Bclass.shape)
''''first row deleted '''
# columns_name = np.array(data2DMat[0])
# features_label = np.delete(columns_name, [0,24], axis = None)
# data2DMat = np.delete(data2DMat, (0), axis=0)
#
#
# '''class labelizing'''
# clss = data2DMat[:, 24]
# print("class lenght:" ,len(clss))
# clssa = exmpDataFact.classDiscreteToInteger(clss)
# classes = clssa[:,None] | [
"reihan@Reihanehs-MacBook-Pro.local"
] | reihan@Reihanehs-MacBook-Pro.local |
3823340ea644b2feec0858721dad3a7c2d67d330 | 1b597dd7630f9a3023faf557e383b0fae703e72b | /test_autogalaxy/unit/aggregator/test_aggregator.py | 40b7acd97191da8084e06012b80ef34395849c57 | [
"MIT"
] | permissive | knut0815/PyAutoGalaxy | 96e9dfc558182169c41e19d3297cdf46b42d5f77 | cc2bc0db5080a278ba7519f94d2a8b2468141e2d | refs/heads/master | 2023-03-05T00:59:51.594715 | 2021-02-09T18:21:30 | 2021-02-09T18:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,428 | py | from os import path
import pytest
import autofit as af
import autogalaxy as ag
from autogalaxy.mock import mock
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="path")
def make_path():
return path.join("{}".format(path.dirname(path.realpath(__file__))), "files")
@pytest.fixture(name="samples")
def make_samples():
galaxy_0 = ag.Galaxy(redshift=0.5, light=ag.lp.EllipticalSersic(centre=(0.0, 1.0)))
galaxy_1 = ag.Galaxy(redshift=1.0, light=ag.lp.EllipticalSersic())
plane = ag.Plane(galaxies=[galaxy_0, galaxy_1])
return mock.MockSamples(max_log_likelihood_instance=plane)
def test__dataset_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
imaging_7x7.positions = ag.Grid2DIrregular([[1.0, 1.0], [2.0, 2.0]])
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
dataset = list(agg.values("dataset"))
print(dataset)
def test__plane_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
plane_gen = ag.agg.Plane(aggregator=agg)
for plane in plane_gen:
assert plane.galaxies[0].redshift == 0.5
assert plane.galaxies[0].light.centre == (0.0, 1.0)
assert plane.galaxies[1].redshift == 1.0
def test__masked_imaging_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
settings=ag.SettingsPhaseImaging(
settings_masked_imaging=ag.SettingsMaskedImaging(
grid_class=ag.Grid2DIterate,
grid_inversion_class=ag.Grid2DIterate,
fractional_accuracy=0.5,
sub_steps=[2],
)
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
masked_imaging_gen = ag.agg.MaskedImaging(aggregator=agg)
for masked_imaging in masked_imaging_gen:
assert (masked_imaging.imaging.image == imaging_7x7.image).all()
assert isinstance(masked_imaging.grid, ag.Grid2DIterate)
assert isinstance(masked_imaging.grid_inversion, ag.Grid2DIterate)
assert masked_imaging.grid.sub_steps == [2]
assert masked_imaging.grid.fractional_accuracy == 0.5
def test__fit_imaging_generator_from_aggregator(imaging_7x7, mask_7x7, samples):
phase_imaging_7x7 = ag.PhaseImaging(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
)
phase_imaging_7x7.run(
dataset=imaging_7x7, mask=mask_7x7, results=mock.MockResults(samples=samples)
)
agg = af.Aggregator(directory=phase_imaging_7x7.paths.output_path)
fit_imaging_gen = ag.agg.FitImaging(aggregator=agg)
for fit_imaging in fit_imaging_gen:
assert (fit_imaging.masked_imaging.imaging.image == imaging_7x7.image).all()
def test__masked_interferometer_generator_from_aggregator(
interferometer_7, visibilities_mask_7, mask_7x7, samples
):
phase_interferometer_7x7 = ag.PhaseInterferometer(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
settings=ag.SettingsPhaseInterferometer(
settings_masked_interferometer=ag.SettingsMaskedInterferometer(
grid_class=ag.Grid2DIterate,
grid_inversion_class=ag.Grid2DIterate,
fractional_accuracy=0.5,
sub_steps=[2],
transformer_class=ag.TransformerDFT,
)
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
real_space_mask=mask_7x7,
)
phase_interferometer_7x7.run(
dataset=interferometer_7,
mask=visibilities_mask_7,
results=mock.MockResults(samples=samples),
)
agg = af.Aggregator(directory=phase_interferometer_7x7.paths.output_path)
masked_interferometer_gen = ag.agg.MaskedInterferometer(aggregator=agg)
for masked_interferometer in masked_interferometer_gen:
assert (
masked_interferometer.interferometer.visibilities
== interferometer_7.visibilities
).all()
assert (masked_interferometer.real_space_mask == mask_7x7).all()
assert isinstance(masked_interferometer.grid, ag.Grid2DIterate)
assert isinstance(masked_interferometer.grid_inversion, ag.Grid2DIterate)
assert masked_interferometer.grid.sub_steps == [2]
assert masked_interferometer.grid.fractional_accuracy == 0.5
assert isinstance(masked_interferometer.transformer, ag.TransformerDFT)
def test__fit_interferometer_generator_from_aggregator(
interferometer_7, visibilities_mask_7, mask_7x7, samples
):
phase_interferometer_7x7 = ag.PhaseInterferometer(
galaxies=dict(
galaxy=ag.GalaxyModel(redshift=0.5, light=ag.lp.EllipticalSersic),
source=ag.GalaxyModel(redshift=1.0, light=ag.lp.EllipticalSersic),
),
search=mock.MockSearch(samples=samples, name="test_phase_aggregator"),
real_space_mask=mask_7x7,
)
phase_interferometer_7x7.run(
dataset=interferometer_7,
mask=visibilities_mask_7,
results=mock.MockResults(samples=samples),
)
agg = af.Aggregator(directory=phase_interferometer_7x7.paths.output_path)
fit_interferometer_gen = ag.agg.FitInterferometer(aggregator=agg)
for fit_interferometer in fit_interferometer_gen:
assert (
fit_interferometer.masked_interferometer.interferometer.visibilities
== interferometer_7.visibilities
).all()
assert (
fit_interferometer.masked_interferometer.real_space_mask == mask_7x7
).all()
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
282283fc9217210a557858c3ef384120074f0fe2 | 5424af93e136131ec74efdb307181d3f0c173a02 | /test/test2.py | 7139c731fc4268a201b8ee4a5f6f45cc90c43d76 | [] | no_license | koy1619/python_test | 8e70021a73d53d9192f6ace2ec346e5e3a92919f | c2315ccb2e66a111ed66b28bb19c7abafaa5c8b7 | refs/heads/master | 2021-01-15T21:50:20.460932 | 2014-06-23T07:37:31 | 2014-06-23T07:37:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#5-5 取余。取一个任意小于1 美元的金额,然后计算可以换成最少多少枚硬币。硬币有1
#美分,5 美分,10 美分,25 美分四种。1 美元等于100 美分。举例来说,0.76 美元换算结果
#应该是 3 枚25 美分,1 枚1 美分。类似76 枚1 美分,2 枚25 美分+2 枚10 美分+1 枚5 美分+1
#枚1 美分这样的结果都是不符合要求的。
money = raw_input('输入任意小于1 美元的金额:')
print money,'美元换算结果'
money = float(money)
money *= 100
money = int(money)
cent25 = money / 25
money %= 25
cent10 = money / 10
money %= 10
cent5 = money / 5
money %= 5
cent1 = money
if cent25 :
print '25美分*',cent25
if cent10 :
print '10美分*',cent10
if cent5 :
print '5美分*',cent5
if cent1 :
print '1美分*',cent1
| [
"maxiaolei007@sina.com"
] | maxiaolei007@sina.com |
b708ef0ba29cc97092ba45507823ff4dd82a5350 | 97062249c6eb04069c6fb01e71d06bc334c828e1 | /desktop/core/ext-py/Django-1.6.10/tests/decorators/tests.py | 05016be231c1703dbabc6a7a8f688f91e33ceaf2 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | Albertsss/hue | 1c8b31c64cc420a029f5b5b80712fb3d0c6cbd6e | 454d320dd09b6f7946f3cc05bc97c3e2ca6cd485 | refs/heads/master | 2021-07-08T17:21:13.237871 | 2018-05-30T06:03:21 | 2018-05-30T06:03:21 | 135,386,450 | 0 | 1 | Apache-2.0 | 2020-07-25T13:36:58 | 2018-05-30T04:06:18 | Python | UTF-8 | Python | false | false | 8,243 | py | from functools import wraps
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required, permission_required, user_passes_test
from django.http import HttpResponse, HttpRequest, HttpResponseNotAllowed
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.utils.decorators import method_decorator
from django.utils.functional import allow_lazy, lazy, memoize
from django.utils.unittest import TestCase
from django.views.decorators.cache import cache_page, never_cache, cache_control
from django.views.decorators.clickjacking import xframe_options_deny, xframe_options_sameorigin, xframe_options_exempt
from django.views.decorators.http import require_http_methods, require_GET, require_POST, require_safe, condition
from django.views.decorators.vary import vary_on_headers, vary_on_cookie
def fully_decorated(request):
"""Expected __doc__"""
return HttpResponse('<html><body>dummy</body></html>')
fully_decorated.anything = "Expected __dict__"
def compose(*functions):
# compose(f, g)(*args, **kwargs) == f(g(*args, **kwargs))
functions = list(reversed(functions))
def _inner(*args, **kwargs):
result = functions[0](*args, **kwargs)
for f in functions[1:]:
result = f(result)
return result
return _inner
full_decorator = compose(
# django.views.decorators.http
require_http_methods(["GET"]),
require_GET,
require_POST,
require_safe,
condition(lambda r: None, lambda r: None),
# django.views.decorators.vary
vary_on_headers('Accept-language'),
vary_on_cookie,
# django.views.decorators.cache
cache_page(60*15),
cache_control(private=True),
never_cache,
# django.contrib.auth.decorators
# Apply user_passes_test twice to check #9474
user_passes_test(lambda u:True),
login_required,
permission_required('change_world'),
# django.contrib.admin.views.decorators
staff_member_required,
# django.utils.functional
lambda f: memoize(f, {}, 1),
allow_lazy,
lazy,
)
fully_decorated = full_decorator(fully_decorated)
class DecoratorsTest(TestCase):
def test_attributes(self):
"""
Tests that django decorators set certain attributes of the wrapped
function.
"""
self.assertEqual(fully_decorated.__name__, 'fully_decorated')
self.assertEqual(fully_decorated.__doc__, 'Expected __doc__')
self.assertEqual(fully_decorated.__dict__['anything'], 'Expected __dict__')
def test_user_passes_test_composition(self):
"""
Test that the user_passes_test decorator can be applied multiple times
(#9474).
"""
def test1(user):
user.decorators_applied.append('test1')
return True
def test2(user):
user.decorators_applied.append('test2')
return True
def callback(request):
return request.user.decorators_applied
callback = user_passes_test(test1)(callback)
callback = user_passes_test(test2)(callback)
class DummyUser(object): pass
class DummyRequest(object): pass
request = DummyRequest()
request.user = DummyUser()
request.user.decorators_applied = []
response = callback(request)
self.assertEqual(response, ['test2', 'test1'])
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
def test_require_safe_accepts_only_safe_methods(self):
"""
Test for the require_safe decorator.
A view returns either a response or an exception.
Refs #15637.
"""
def my_view(request):
return HttpResponse("OK")
my_safe_view = require_safe(my_view)
request = HttpRequest()
request.method = 'GET'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'HEAD'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'POST'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'PUT'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'DELETE'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
# For testing method_decorator, a decorator that assumes a single argument.
# We will get type arguments if there is a mismatch in the number of arguments.
def simple_dec(func):
def wrapper(arg):
return func("test:" + arg)
return wraps(func)(wrapper)
simple_dec_m = method_decorator(simple_dec)
# For testing method_decorator, two decorators that add an attribute to the function
def myattr_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr = True
return wraps(func)(wrapper)
myattr_dec_m = method_decorator(myattr_dec)
def myattr2_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr2 = True
return wraps(func)(wrapper)
myattr2_dec_m = method_decorator(myattr2_dec)
class MethodDecoratorTests(TestCase):
"""
Tests for method_decorator
"""
def test_preserve_signature(self):
class Test(object):
@simple_dec_m
def say(self, arg):
return arg
self.assertEqual("test:hello", Test().say("hello"))
def test_preserve_attributes(self):
# Sanity check myattr_dec and myattr2_dec
@myattr_dec
@myattr2_dec
def func():
pass
self.assertEqual(getattr(func, 'myattr', False), True)
self.assertEqual(getattr(func, 'myattr2', False), True)
# Now check method_decorator
class Test(object):
@myattr_dec_m
@myattr2_dec_m
def method(self):
"A method"
pass
self.assertEqual(getattr(Test().method, 'myattr', False), True)
self.assertEqual(getattr(Test().method, 'myattr2', False), True)
self.assertEqual(getattr(Test.method, 'myattr', False), True)
self.assertEqual(getattr(Test.method, 'myattr2', False), True)
self.assertEqual(Test.method.__doc__, 'A method')
self.assertEqual(Test.method.__name__, 'method')
class XFrameOptionsDecoratorsTests(TestCase):
"""
Tests for the X-Frame-Options decorators.
"""
def test_deny_decorator(self):
"""
Ensures @xframe_options_deny properly sets the X-Frame-Options header.
"""
@xframe_options_deny
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_sameorigin_decorator(self):
"""
Ensures @xframe_options_sameorigin properly sets the X-Frame-Options
header.
"""
@xframe_options_sameorigin
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_exempt_decorator(self):
"""
Ensures @xframe_options_exempt properly instructs the
XFrameOptionsMiddleware to NOT set the header.
"""
@xframe_options_exempt
def a_view(request):
return HttpResponse()
req = HttpRequest()
resp = a_view(req)
self.assertEqual(resp.get('X-Frame-Options', None), None)
self.assertTrue(resp.xframe_options_exempt)
# Since the real purpose of the exempt decorator is to suppress
# the middleware's functionality, let's make sure it actually works...
r = XFrameOptionsMiddleware().process_response(req, resp)
self.assertEqual(r.get('X-Frame-Options', None), None)
| [
"540227148@qq.com"
] | 540227148@qq.com |
a13c30cb58afafc6423d27afc1fc0999a91c663c | 54fdc31183763afafe5c07de6cb4ed4167fbf2f2 | /projet_logiciel/main_fin_p3c1.py | 6bef301691d6719391cf4b517283db923e506f13 | [] | no_license | Patrick-Wampe/Debugging-avec-Python | e089130352c02ae78d04fe4ee4f8286d5eabce9f | fd9d36b45e063b25a694e0974f6051c26ae2ee94 | refs/heads/main | 2023-05-14T14:18:03.832620 | 2021-06-10T10:35:20 | 2021-06-10T10:35:20 | 374,808,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,179 | py | # -*- coding: utf8 -*-
from random import randint
import datetime
print("Bienvenue dans le jeu Pierre Feuille Ciseaux")
nom = "debogeur"
annee_actuelle = datetime.datetime.now().strftime("%Y")
annee_de_naissance = 2000
print(f"Bonjour {nom}, nous sommes en {annee_actuelle}, du coup tu as {int(annee_actuelle) - annee_de_naissance}.")
class pierre_feuille_ciseaux :
def _init_(self, nouveau_score_joueur, nouveau_score_ia, label_joueur, label_ia):
self.score_joueur = 0
self.score_intelligence_artificielle = 0
self.nouveau_score_joueur = nouveau_score_joueur
self.nouveau_score_ia = nouveau_score_ia
self.label_joueur = label_joueur
self.label_ia = label_ia
def maj_scores(self, choix_ia, choix_joueur):
if choix_ia == 1 and choix_joueur == 2:
self.score_joueur += 1
elif choix_ia == 2 and choix_joueur == 1:
self.score_intelligence_artificielle += 1
elif choix_ia == 1 and choix_joueur == 3:
self.score_intelligence_artificielle += 1
elif choix_ia == 3 and choix_joueur == 1:
self.score_joueur += 1
elif choix_ia == 3 and choix_joueur == 2:
self.score_intelligence_artificielle += 1
elif choix_ia == 2 and choix_joueur == 3:
self.score_joueur += 1
def jouer(self, choix_joueur):
choix_ia = randint(1,3)
if choix_ia==1:
self.label_ia.configure(image=pierre)
elif choix_ia==2:
self.label_ia.configure(image=feuille)
else:
self.label_ia.configure(image=ciseaux)
self.maj_scores(choix_ia,choix_joueur)
self.nouveau_score_joueur.configure(text=str(self.score_joueur))
self.nouveau_score_ia.configure(text=str(self.score_intelligence_artificielle))
def jouer_pierre():
self.jouer(1)
self.label_joueur.configure(image=pierre)
def jouer_feuille(self):
self.jouer(2)
self.label_joueur.configure(image=feuille)
def jouer_ciseaux(self):
self.jouer(3)
self.label_joueur.configure(image=ciseaux)
def rejouer(self):
self.score_joueur = 0
self.score_intelligence_artificielle = 0
self.nouveau_score_joueur.configure(text=str(self.score_joueur))
self.nouveau_score_ia.configure(text=str(self.score_intelligence_artificielle))
self.label_joueur.configure(image=zero)
self.label_ia.configure(image=zero)
from tkinter import PhotoImage, Tk
try:
versus = PhotoImage(file ='vs.gif')
pierre = PhotoImage(file ='pierre.gif')
feuille = PhotoImage(file ='feuille.gif')
ciseaux = PhotoImage(file ='ciseaux.gif')
except RuntimeError:
print("""Il y a un erreur au niveau de la fonction PhotoImage()
D'après l'erreur tu as importé les images avant de créer la fenêtre.""")
fenetre = Tk()
fenetre.title("Pierre Feuille Ciseaux")
try :
texte1 = Label(fenetre, text="Vous", font=("Arial", "20", "bold"))
texte1.grid(row=0,column=0)
texte2 = Label(fenetre, text="Intelligence artificielle", font=("Arial", 20, "bold"))
texte2.grid(row=0,column=2)
texte3 = Label(fenetre, text="Pour jouer, cliquez sur une des icônes ci-dessous.",font=("Arial", 20, "bold"))
texte3.grid(row=3, columnspan =3, pady =5)
nouveau_score_joueur = Label(fenetre, text="0", font=("Arial", 20, "bold"))
nouveau_score_joueur.grid(row=1, column=0)
nouveau_score_ia = Label(fenetre, text="0", font=("Arial", 20, "bold"))
nouveau_score_ia.grid(row=1, column=2)
label_joueur = Label(fenetre, image=zero)
label_joueur.grid(row =2, column =0)
label_vs = Label(fenetre, image=versus)
label_vs.grid(row =2, column =1)
label_ia = Label(fenetre, image=zero)
label_ia.grid(row =2, column =2)
except NameError:
print("La fonction Label() n'a pas été importée.")
try:
zero = PhotoImage(file ='zero.jpg')
except :
print("Une exception _tkinter.TclError a été levée.")
try:
jeu = pierre_feuille_ciseaux(nouveau_score_joueur, nouveau_score_ia, label_joueur, label_ia)
except NameError:
print("Un des paramètres de la classe pose problème.")
try:
bouton_pierre = Button(fenetre,command=jeu.jouer_pierre).configure(image=pierre).grid(row =4, column =0)
bouton_feuille = Button(fenetre,command=jeu.jouer_feuille)
bouton_feuille.configure(image=feuille)
bouton_feuille.grid(row =4, column =1,)
bouton_ciseaux = Button(fenetre,command=jeu.jouer_ciseaux)
bouton_ciseaux.configure(image=ciseaux)
bouton_ciseaux.grid(row =4, column =2)
bouton_recommence = Button(fenetre,text='Rejouer',command=jeu.rejouer,font=("Courier", 20, "bold"))
bouton_recommencer.grid(row =5, column =0, pady =10, sticky=E)
bouton_quitter = Button(fenetre,text='Quitter',command=quit,font=("Courier", 20, "bold"))
bouton_quitter.grid(row =5, column =2, pady =10, sticky=W)
except NameError:
print("La fonction Button() n'a été importée.")
fenetre.mainloop() | [
"noreply@github.com"
] | Patrick-Wampe.noreply@github.com |
d5aadd00bb09ce66112973792bc22e130386a94a | b21c237d6cbd8dcea90fa18154cc3b9e11508129 | /main.py | c6e6b8969d727e1b239b8a924268cb8d729c773f | [] | no_license | topoko123/fastapi-get-post-patch-put-del-basic | 4ad72de27b18e3deb3a5e107b826742057d30a13 | fe8165c714fc0e48bdd4c2b4dd5b5b74d8e1aab8 | refs/heads/main | 2023-05-08T16:32:00.387261 | 2021-05-30T11:26:34 | 2021-05-30T11:26:34 | 372,191,718 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,518 | py | from fastapi import FastAPI
from pydantic import BaseModel #Import BaseModel for receive
from typing import List, Optional
import uvicorn
class Item(BaseModel):
name : Optional[str] = None
description: Optional[str] = None
price : Optional[float] = None
class Address(BaseModel): #Collections of user address
house_no : str
village_no : str
alley : str
lane : str
road : str
sub_district : str
district : str
province : str
postal_code : str
class UserBase(BaseModel):
firstname : str
lastname : str
age : Optional[int] = None
email : Optional[str] = None
address : List[Address] #List[Address] <-- From class address(BaseModel)
class ListAll(BaseModel):
list_user : List[UserBase] #List[UserBase] <-- From class UserBase(BaseModel)
app = FastAPI()
items = { # Fake_db
'1': {
'name': 'mouse',
'description': 'This is a Mouse',
'price': 590,
},
'2': {
'name': 'keyboard',
'description': 'This is a Keyboard :)',
'price': 3490
}
}
@app.get('/items/{item_id}' )
async def read_item(item_id: str):
if item_id == '0':
return items
else:
return items[item_id]
@app.get('/fullname/{firstname}/{lastname}')
async def read_fullname(firstname: str, lastname: str):
return firstname, lastname
@app.put('/items/{item_id}')
async def update_item(item_id: str, item: Item): #Item is from class Item(BaseModel)
items[item_id].update(**item.dict())
msg = 'update success'
return msg, item
@app.patch('/items/patch/{item_id}')
async def update_item_patch(item_id: str, item: Item): #Item is from class Item(BaseModel)
stored_item_data = items[item_id]
stored_item_model = Item(**stored_item_data)
update_data = item.dict(exclude_unset=True)
update_item = stored_item_model.copy(update=update_data)
items[item_id] = dict(update_item)
return items[item_id]
@app.delete('/items/delete/{item_id}')
async def delete_item(item_id: str):
items.pop(item_id)
return 'delete success'
#------------------------------------------------------------------#
@app.post('/user', response_model=ListAll)
async def user(request: ListAll): #ListAll is from class ListAll(BaseModel)
return request
if __name__ == '__main__':
uvicorn.run(app, host="0.0.0.0", port=80, debug=True)
| [
"m131.mdsoft@gmail.com"
] | m131.mdsoft@gmail.com |
4eba43e78eb66a2ee052c5643ef10dcfafa48917 | f299be6bd4ce2d34b3ddf625ecc44a9d99ce9bb7 | /etc/docker/dev/Other-certs/test-create-rules-pau.py | 2c74ee483cf7fed6431c058b6f05049b0c511100 | [
"Apache-2.0"
] | permissive | pic-es/rucio | 8a4688e26dd45062ff45ddc29dffa96ea2d7562f | f91ae9e79be62890d975a333b7976c89f7fed735 | refs/heads/master | 2022-12-30T13:34:01.663270 | 2020-09-04T13:31:24 | 2020-09-04T13:31:24 | 281,892,842 | 1 | 0 | Apache-2.0 | 2020-07-23T08:16:19 | 2020-07-23T08:16:18 | null | UTF-8 | Python | false | false | 31,015 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import absolute_import, division, print_function
__author__ = "Agustin Bruzzese"
__copyright__ = "Copyright (C) 2020 Agustin Bruzzese"
__revision__ = "$Id$"
__version__ = "0.2"
import sys
sys.path.append("/usr/lib64/python3.6/site-packages/")
import gfal2
import io
import json
import linecache
import logging
import numpy as np
import os
import os.path
import random
import re
import time
import uuid
import zipfile
import string
import pathlib
import time
import pytz
from urllib.parse import urlunsplit
import graphyte, socket
from dateutil import parser
from datetime import (
datetime,
tzinfo,
timedelta,
timezone,
)
from gfal2 import (
Gfal2Context,
GError,
)
from io import StringIO
# Set Rucio virtual environment configuration
os.environ['RUCIO_HOME']=os.path.expanduser('~/Rucio-v2/rucio')
from rucio.rse import rsemanager as rsemgr
from rucio.client.client import Client
from rucio.client.didclient import DIDClient
from rucio.client.replicaclient import ReplicaClient
import rucio.rse.rsemanager as rsemgr
from rucio.client import RuleClient
from rucio.common.exception import (AccountNotFound, Duplicate, RucioException, DuplicateRule, InvalidObject, DataIdentifierAlreadyExists, FileAlreadyExists, RucioException,
AccessDenied, InsufficientAccountLimit, RuleNotFound, AccessDenied, InvalidRSEExpression,
InvalidReplicationRule, RucioException, DataIdentifierNotFound, InsufficientTargetRSEs,
ReplicationRuleCreationTemporaryFailed, InvalidRuleWeight, StagingAreaRuleRequiresLifetime)
from rucio.common.utils import adler32, detect_client_location, execute, generate_uuid, md5, send_trace, GLOBALLY_SUPPORTED_CHECKSUMS
gfal2.set_verbose(gfal2.verbose_level.debug)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Import Magic naming
from lfn2pfn import *
# In[2]:
class Rucio :
def __init__(self, myscope, orgRse, destRse, account='bruzzese', working_folder=None):
self.myscope = myscope
self.orgRse = orgRse
self.destRse = destRse
self.working_folder = working_folder
self.gfal = Gfal2Context()
self.didc = DIDClient()
self.repc = ReplicaClient()
self.rulesClient = RuleClient()
# Configuration
self.account = account
# account=account
self.client = Client(account=self.account)
# Get list of all RSEs
def rses(self) :
rses_lists = list()
for single_rse in list(self.client.list_rses()) :
rses_lists.append(single_rse['rse'])
return(rses_lists)
def usage(self,s_rse) :
return(list(self.client.get_local_account_usage(account=self.account,rse=s_rse))[0])
def rules(self) :
return(list(self.client.list_account_rules(account=self.account)))
def myfunc(self):
print("Hello your setting are account=%s, scope=%s, origin RSE =%s and destination RSE =%s" %(self.account, self.myscope, self.orgRse, self.destRse))
def file_exists(self, pfn) :
try :
self.gfal.stat(pfn).st_size
return(True)
except :
return(False)
def get_rse_url(self):
"""
Return the base path of the rucio url
"""
rse_settings = rsemgr.get_rse_info(self.orgRse)
protocol = rse_settings['protocols'][0]
schema = protocol['scheme']
prefix = protocol['prefix']
port = protocol['port']
rucioserver = protocol['hostname']
rse_url = list()
if None not in (schema,str(rucioserver+':'+str(port)),prefix):
rse_url.extend([schema,rucioserver+':'+str(port),prefix,'',''])
if self.working_folder != None :
# Check if our test folder exists
path = os.path.join(urlunsplit(rse_url), self.working_folder)
self.gfal.mkdir_rec(path, 775)
return(path)
else :
return(urlunsplit(rse_url))
else :
return('Wrong url parameters')
def check_replica(self, lfn, dest_rse=None):
"""
Check if a replica of the given file at the site already exists.
"""
if lfn :
replicas = list(
self.client.list_replicas([{
'scope': self.myscope,
'name': lfn
}], rse_expression=dest_rse))
if replicas:
for replica in replicas:
if isinstance(replica,dict) :
if dest_rse in replica['rses']:
path = replica['rses'][dest_rse][0]
return(path)
return(False)
############################
## Create Metadata for DIDs
############################
def getFileMetaData(self, p_file, origenrse=None):
"""
Get the size and checksum for every file in the run from defined path
"""
'''
generate the registration of the file in a RSE :
:param rse: the RSE name.
:param scope: The scope of the file.
:param name: The name of the file.
:param bytes: The size in bytes.
:param adler32: adler32 checksum.
:param pfn: PFN of the file for non deterministic RSE
:param dsn: is the dataset name.
'''
name = os.path.basename(p_file)
name = name.replace('/','')
replica = {
'scope': self.myscope,
'name': name.replace('+','_'),
'adler32': self.gfal.checksum(p_file, 'adler32'),
'bytes': self.gfal.stat(p_file).st_size,
'pfn': p_file,
"meta": {"guid": str(generate_uuid())}
}
Data = dict();
Data['replica'] = replica
Data['scope'] = self.myscope
return(Data)
############################
## Create Groups of DIDs
############################
def createDataset(self, new_dataset) :
logger.debug("| - - Checking if a provided dataset exists: %s for a scope %s" % (new_dataset, self.myscope))
try:
self.client.add_dataset(scope=self.myscope, name=new_dataset)
return(True)
except DataIdentifierAlreadyExists:
return(False)
except Duplicate as error:
return generate_http_error_flask(409, 'Duplicate', error.args[0])
except AccountNotFound as error:
return generate_http_error_flask(404, 'AccountNotFound', error.args[0])
except RucioException as error:
exc_type, exc_obj, tb = sys.exc_info()
logger.debug(exc_obj)
def createcontainer(self, name_container):
'''
registration of the dataset into a container :
:param name_container: the container's name
:param info_dataset : contains,
the scope: The scope of the file.
the name: The dataset name.
'''
logger.debug("| - - - registering container %s" % name_container)
try:
self.client.add_container(scope=self.myscope, name=name_container)
except DataIdentifierAlreadyExists:
logger.debug("| - - - Container %s already exists" % name_container)
except Duplicate as error:
return generate_http_error_flask(409, 'Duplicate', error.args[0])
except AccountNotFound as error:
return generate_http_error_flask(404, 'AccountNotFound', error.args[0])
except RucioException as error:
exc_type, exc_obj, tb = sys.exc_info()
logger.debug(exc_obj)
############################
## General funciotn for registering a did into a GROUP of DID (CONTAINER/DATASET)
############################
def registerIntoGroup(self,n_file, new_dataset):
"""
Attaching a DID to a GROUP
"""
type_1 = self.client.get_did(scope=self.myscope, name=new_dataset)
type_2 = self.client.get_did(scope=self.myscope, name=n_file)
try:
self.client.attach_dids(scope=self.myscope, name=new_dataset, dids=[{'scope':self.myscope, 'name':n_file}])
except RucioException:
logger.debug("| - - - %s already attached to %s" %(type_2['type'],type_1['type']))
############################
## MAGIC functions
############################
def create_groups(self, organization) :
# 2.1) Create the dataset and containers for the file
self.createDataset(organization['dataset_1'])
# 2.1.1) Attach the dataset and containers for the file
self.registerIntoGroup(organization['replica'], organization['dataset_1'])
# 2.2) Create the dataset and containers for the file
self.createcontainer(organization['container_1'])
# 2.2.1) Attach the dataset and containers for the file
self.registerIntoGroup(organization['dataset_1'], organization['container_1'])
# 2.3) Create the dataset and containers for the file
self.createcontainer(organization['container_2'])
# 2.3.1) Attach the dataset and containers for the file
self.registerIntoGroup(organization['container_1'], organization['container_2'])
# 2.4) Create the dataset and containers for the file
self.createcontainer(organization['container_3'])
# 2.4.1) Attach the dataset and containers for the file
self.registerIntoGroup(organization['container_2'], organization['container_3'])
############################
## Create Rule for DIDs
############################
def addReplicaRule(self, destRSE, group):
"""
Create a replication rule for one dataset at a destination RSE
"""
type_1 = self.client.get_did(scope=self.myscope, name=group)
logger.debug("| - - - Creating replica rule for %s %s at rse: %s" % (type_1['type'], group, destRSE))
if destRSE:
try:
rule = self.rulesClient.add_replication_rule([{"scope":self.myscope,"name":group}],copies=1, rse_expression=destRSE, grouping='ALL', account=self.account, purge_replicas=True)
logger.debug("| - - - - Rule succesfully replicated at %s" % destRSE)
logger.debug("| - - - - - The %s has the following id %s" % (rule, destRSE))
return(rule[0])
except DuplicateRule:
exc_type, exc_obj, tb = sys.exc_info()
rules = list(self.client.list_account_rules(account=self.account))
if rules :
for rule in rules :
if rule['rse_expression'] == destRSE and rule['scope'] == self.myscope and rule['name'] == group:
logger.debug('| - - - - Rule already exists %s which contains the following DID %s:%s %s' % (rule['id'],self.myscope, group, str(exc_obj)))
except ReplicationRuleCreationTemporaryFailed:
exc_type, exc_obj, tb = sys.exc_info()
rules = list(self.client.list_account_rules(account=self.account))
if rules :
for rule in rules :
if rule['rse_expression'] == destRSE and rule['scope'] == self.myscope and rule['name'] == group:
print('| - - - - Rule already exists %s which contains the following DID %s:%s %s' % (rule['id'],self.myscope, group, str(exc_obj)))
############################
## Create Rules for not registered DIDs
############################
def outdated_register_replica(self, filemds, dest_RSE, org_RSE):
"""
Register file replica.
"""
carrier_dataset = 'outdated_replication_dataset' + '-' + str(uuid.uuid4())
creation = self.createDataset(carrier_dataset)
# Make sure your dataset is ephemeral
self.client.set_metadata(scope=self.myscope, name=carrier_dataset, key='lifetime', value=86400) # 86400 in seconds = 1 day
# Create a completly new create the RULE:
for filemd in filemds :
outdated = filemd['replica']['name']
self.registerIntoGroup(outdated, carrier_dataset)
# Add dummy dataset for replicating at Destination RSE
rule_child = self.addReplicaRule(dest_RSE, group=carrier_dataset)
# Add dummy dataset for replicating Origin RSE
rule_parent = self.addReplicaRule(org_RSE, group=carrier_dataset)
# Create a relation rule between origin and destiny RSE, so that the source data can be deleted
rule = self.client.update_replication_rule(rule_id=rule_parent, options={'lifetime': 10, 'child_rule_id':rule_child, 'purge_replicas':True})
logger.debug('| - - - - Creating relationship between parent %s and child %s : %s' % (rule_parent, rule_child, rule))
# Create a relation rule between the destinity rule RSE with itself, to delete the dummy rule, whiles keeping the destiny files
rule = self.client.update_replication_rule(rule_id=rule_child, options={'lifetime': 10, 'child_rule_id':rule_child})
logger.debug('| - - - - Creating relationship between parent %s and child %s : %s' % (rule_parent, rule_child, rule))
############################
## Create Dictionary for Grafana
############################
def stats_rules(self, rules) :
'''
Gather general information about
total number of rules, and stats.
'''
RUCIO = dict()
if rules :
for rule in rules :
if 'outdated_replication_dataset' not in rule['name'] :
if 'Rules' not in RUCIO :
RUCIO['Rules'] = {
'total_stuck' : 0,
'total_replicating' : 0,
'total_ok' : 0,
'total_rules': 0
}
RUCIO['Rules']['total_rules'] += 1
if rule['state'] == 'REPLICATING' :
RUCIO['Rules']['total_replicating'] += 1
elif rule['state'] == 'STUCK' :
RUCIO['Rules']['total_stuck'] += 1
elif rule['state'] == 'OK' :
RUCIO['Rules']['total_ok'] += 1
else :
RUCIO['Rules']['total_rules'] += 1
if rule['state'] == 'REPLICATING' :
RUCIO['Rules']['total_replicating'] += 1
elif rule['state'] == 'STUCK' :
RUCIO['Rules']['total_stuck'] += 1
elif rule['state'] == 'OK' :
RUCIO['Rules']['total_ok'] += 1
if 'AllRules' not in RUCIO :
RUCIO['AllRules'] = {
'total_stuck' : 0,
'total_replicating' : 0,
'total_ok' : 0,
'total_rules': 0
}
RUCIO['AllRules']['total_rules'] += 1
if rule['state'] == 'REPLICATING' :
RUCIO['AllRules']['total_replicating'] += 1
elif rule['state'] == 'STUCK' :
RUCIO['AllRules']['total_stuck'] += 1
elif rule['state'] == 'OK' :
RUCIO['AllRules']['total_ok'] += 1
else :
RUCIO['AllRules']['total_rules'] += 1
if rule['state'] == 'REPLICATING' :
RUCIO['AllRules']['total_replicating'] += 1
elif rule['state'] == 'STUCK' :
RUCIO['AllRules']['total_stuck'] += 1
elif rule['state'] == 'OK' :
RUCIO['AllRules']['total_ok'] += 1
##################
if 'Grouping' not in RUCIO :
RUCIO['Grouping'] = {
'file' : 0,
'dataset' : 0,
'container' : 0
}
if rule['did_type'] == 'CONTAINER' :
RUCIO['Grouping']['container'] += 1
elif rule['did_type'] == 'DATASET' :
RUCIO['Grouping']['dataset'] += 1
elif rule['did_type'] == 'FILE' :
RUCIO['Grouping']['file'] += 1
else :
if rule['did_type'] == 'CONTAINER' :
RUCIO['Grouping']['container'] += 1
elif rule['did_type'] == 'DATASET' :
RUCIO['Grouping']['dataset'] += 1
elif rule['did_type'] == 'FILE' :
RUCIO['Grouping']['file'] += 1
return(RUCIO)
def stats_replica_rules(self, rules) :
'''
Gather specific information about
state and number of replicas.
'''
REPLICAS = dict()
REPLICAS['RSE'] = {}
if rules :
# Creates a key for all the RSEs that we have replicas
for rule in rules :
# if the RSE is not in the dictionary
#print(rule['rse_expression'], REPLICAS['RSE'])
if rule['rse_expression'] not in REPLICAS['RSE'] :
#print(REPLICAS)
REPLICAS['RSE'][rule['rse_expression']] = {
'total_replica_stuck' : rule['locks_stuck_cnt'],
'total_replica_replicating' : rule['locks_replicating_cnt'],
'total_replica_ok' : rule['locks_ok_cnt']
}
# else if it is, update replica numbers
else :
REPLICAS['RSE'][rule['rse_expression']]['total_replica_stuck'] += rule['locks_stuck_cnt']
REPLICAS['RSE'][rule['rse_expression']]['total_replica_replicating'] += rule['locks_replicating_cnt']
REPLICAS['RSE'][rule['rse_expression']]['total_replica_ok'] += rule['locks_ok_cnt']
return(REPLICAS)
def stats_usage_rules(self, all_rses) :
STORAGE = dict()
STORAGE['USAGE'] = {}
for x_rse in all_rses :
rses = self.usage(x_rse)
if rses['bytes'] != 0 :
if rses['rse'] not in STORAGE['USAGE'] :
STORAGE['USAGE'][rses['rse']] = {
'total_bytes_used' : rses['bytes']
}
# else if it is, update replica numbers
else :
STORAGE['USAGE'][rses['rse']]['total_bytes_used'] += rses['bytes']
return(STORAGE)
# In[3]:
class Look_for_Files :
def __init__(self) :
self.gfal = Gfal2Context()
def check_directory(self, path):
try :
full_path = self.gfal.listdir(str(path))
is_dir_or_not = True
except:
is_dir_or_not = False
return(is_dir_or_not)
def scrap_through_files(self, path) :
all_files = []
# Itinerate over all the entries
listFiles = self.gfal.listdir(str(self.path))
for file in listFiles :
# Create full Path
fullPath = os.path.join(self.path, file)
is_dir = self.check_directory(fullPath)
# If entry is a directory then get the list of files in
if is_dir == True :
pass
else :
all_files.append(fullPath)
return(all_files)
def scrap_through_dir(self, path) :
logger.debug("*-Listin files from url : %s" % path)
all_files = []
# Itinerate over all the entries
listFiles = self.gfal.listdir(str(path))
for file in listFiles :
# Create full Path
fullPath = os.path.join(path, file)
is_dir = self.check_directory(fullPath)
# If entry is a directory then get the list of files in
if is_dir == True :
logger.debug('|--- ' + fullPath + ' its a directory ')
all_files = all_files + self.scrap_through_dir(fullPath)
else :
logger.debug('|--- '+ fullPath + ' its a file')
all_files.append(fullPath)
return(all_files)
# In[4]:
############################
# Check existence of json File
############################
def json_write(data, filename='Rucio-bkp.json'):
with io.open(filename, 'w') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def json_check(json_file_name='Rucio-bkp.json') :
# checks if file exists
if not os.path.isfile(json_file_name) :
logger.debug("Either file is missing or is not readable, creating file...")
return(False)
elif os.stat(json_file_name).st_size == 0 :
os.remove(json_file_name)
return(False)
elif os.path.isfile(json_file_name) and os.access(json_file_name, os.R_OK) :
logger.debug("File exists in JSON and is readable")
return(True)
# In[5]:
def register_rucio() :
# Look for files in the orgRse
l1 = Look_for_Files()
listOfFiles = l1.scrap_through_dir(r1.get_rse_url())
if listOfFiles :
# Create a dictionary with the properties for writing a json
result_dict = dict();
for dest in r1.destRse :
# Create an array for those files that has not been replicated
n_unreplicated = []
for n in range(0,len(listOfFiles)):
# for n in range(0,20):
name = str(listOfFiles[n])
logger.debug('| - ' + str(n) + ' - ' + str(len(listOfFiles)) + ' name : ' + name)
# Break down the file path
f_name = base=os.path.basename(name)
# Check if file is already is registered at a particular destination RSE
check = r1.check_replica(lfn=f_name.replace('+','_'), dest_rse=dest)
# If it is registered, skip add replica
if check != False : ## needs to be changed to False
logger.debug('| - - The FILE %s already have a replica at RSE %s : %s' % (f_name, dest, check))
# Else, if the files has no replica at destination RSE
else :
# 1) Get the file metadata
metaData = r1.getFileMetaData(name, r1.orgRse)
r1.client.add_replicas(rse=r1.orgRse, files=[metaData['replica']])
# 2) Look for create and attach groups
# look at script lfn2pfn.py
group = groups(name)
# functions : groups and create_groups
r1.create_groups(group)
# 3) Add information to Json file :
temp_dict = dict()
temp_dict[f_name] = {}
temp_dict[f_name]['Properties'] = {**metaData['replica'], **{'updated': datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')}}
temp_dict[f_name]['Organization'] = group
temp_dict[f_name]['Replicated'] = {dest : {**{'state': 'REPLICATING'}, **{'registered': datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')}}}
# 4) Contruct a dictionary
if f_name in result_dict :
result_dict[f_name]['Replicated'].update(temp_dict[f_name]['Replicated'])
# if its is the first entry, add the RSE where it was found :
elif f_name not in result_dict :
origin = { r1.orgRse : {
'path': name,
'registered': datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ'),
'state': 'ALIVE',
}}
temp_dict[f_name]['Replicated'].update(origin)
result_dict[f_name] = temp_dict[f_name]
# 5) Create the Main Replication Rule at Destination RSE
main_rule = r1.addReplicaRule(dest, group['container_3'])
logger.debug("| - - - - Getting parameters for rse %s" % dest)
# 6 ) Create the json array
# Finally, add them to a general list
n_unreplicated.append(metaData)
logger.debug('Your are going to replicate %s files' % str(len(n_unreplicated)))
print('Your are going to replicate %s files' % str(len(n_unreplicated)))
## Now, create Dummy rules between the ORIGIN and DESTINATION RSEs
if len(n_unreplicated) > 0 :
r1.outdated_register_replica(n_unreplicated, dest, r1.orgRse)
# Finally return the information of the replicas as a dictionary
return(result_dict)
# In[6]:
def stateCheck(json_file='Rucio-bkp.json'):
with open(json_file) as f :
data_keys = json.load(f)
for file in data_keys :
for ele in data_keys[file].values():
if isinstance(ele,dict):
for key, value in ele.items():
if key in r1.rses() :
if 'path' in value:
if value['state'] == 'ALIVE' :
# Check for deleted files
try :
existence = r1.file_exists(value['path'])
# If gfal fails, it means that the file still exists
except :
print('failed')
dead_state = dict()
dead_state = {'state': 'DEAD',
'deleted': datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')
}
data_keys[file]['Replicated'][key].update(dead_state)
elif 'state' in value :
# Check completed transference files
if value['state'] == 'REPLICATING' :
#check = check_replica(DEFAULT_SCOPE, file.strip('+').replace('+','_'), dest_rse=info[0])
check = r1.check_replica(lfn=file.replace('+','_'), dest_rse=key)
#if there's no replica at destiny RSE
if check != False :
replication_state = dict()
replication_state = {'path': check,
'copied': datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ'),
'state': 'ALIVE'}
# Update the dictionary with the file properties
data_keys[file]['Replicated'][key].update(replication_state)
return(data_keys)
# In[7]:
class Grafana :
def __init__(self) :
self.gr_prefix = [line for line in open('/etc/collectd.d/write_graphite-config.conf', 'r').readlines() if "Prefix" in line][0].strip().split()[1].strip('"')
## Prepare data for plots replicas
def prepare_grafana(self, dictionary, string='RUCIO.') :
metric_list = []
for key in dictionary.keys() :
if isinstance(dictionary[key],int):
metric_list.append((str(string+key),dictionary[key]) )
elif isinstance(dictionary[key],dict):
metric_list.extend(self.prepare_grafana(dictionary[key], str(string+key+'.')))
return(metric_list)
def send_to_graf(self, dictionary, myport=2013, myprotocol='udp') :
for key in self.prepare_grafana(dictionary):
if (key[0], key[1]) is not None :
#print(key[0].lower(),key[1])
graphyte.Sender('graphite01.pic.es', port=myport, protocol=myprotocol, prefix=self.gr_prefix + socket.gethostname().replace(".","_")).send(key[0].lower(), key[1])
graphyte.Sender('graphite02.pic.es', port=myport, protocol=myprotocol, prefix=self.gr_prefix + socket.gethostname().replace(".","_")).send(key[0].lower(), key[1])
# In[8]:
if __name__ == '__main__':
# Initialize Rucio class and functions
r1 = Rucio(myscope='test-pau', orgRse='XRD2-NON-DET',
destRse=['XRD1-DET'],
account='pau', working_folder='Server-test')
r1.myfunc()
# It creates the main rule for replication at Destinatio RSE (see rses_catch)
replication_dict = register_rucio()
if json_check() == True :
check_dict = stateCheck()
# if both results resulted ok
if isinstance(replication_dict,dict) & isinstance(check_dict,dict):
replication_dict.update(check_dict)
elif not check_dict :
replication_dict = replication_dict
elif not replication_dict:
replication_dict = check_dict
# creates a resulting dictionary with the files found with their respective
# RSEs where they have been replicated
json_write(replication_dict)
'''# Load grafana module
g1 = Grafana()
# 1) Plot general state of rules
g1.send_to_graf(r1.stats_rules(r1.rules()))
# 2) Plot state of replicas per RSE
g1.send_to_graf(r1.stats_replica_rules(r1.rules()))
# 3) Plot RSE usage
g1.send_to_graf(r1.stats_usage_rules(r1.rses()))'''
| [
"bruzzese.agustin@gmail.com"
] | bruzzese.agustin@gmail.com |
239859618e261ad2335982654069188d008796f4 | 34a4a61dfc837bd0e55f985869c32baef2b9b867 | /Shelter.back/domain/enums.py | 27561a99064728b3fa4fa6991b66a3793c826c2f | [] | no_license | BlueInt32/shelter | 5934984b6a256659ec4a20f80926488c5687083c | fc8c238a501c608ae38e2dd01724c7b91ff77a1a | refs/heads/master | 2023-01-13T00:31:24.370302 | 2020-05-26T05:22:12 | 2020-05-26T05:22:12 | 225,805,189 | 0 | 0 | null | 2023-01-05T03:45:58 | 2019-12-04T07:24:39 | Vue | UTF-8 | Python | false | false | 83 | py | from enum import Enum
class PersistanceType(Enum):
CREATE = 1,
UPDATE = 2
| [
"simon.budin@gmail.com"
] | simon.budin@gmail.com |
9ca2e633e67d0d45a394bc37f48e58a9aa50e852 | 72fc93362a930d2ef8c951c6c53f9862abf60b03 | /captain_console/user/migrations/0002_auto_20200509_1717.py | 6e4cd570b49e6b936f684c1885309a9879b319d7 | [] | no_license | valdisbaerings/VN_2 | 434a08ad1cac0ba2fb45dc0d5707cc62cc117771 | 52ab14980e6e2c39d9007f3e9cefe276d5aebbb6 | refs/heads/master | 2022-06-20T04:51:58.070386 | 2020-05-14T16:40:50 | 2020-05-14T16:40:50 | 262,390,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # Generated by Django 3.0.6 on 2020-05-09 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile_image',
field=models.CharField(default='https://img.icons8.com/pastel-glyph/512/000000/user-male--v1.png', max_length=9999),
),
]
| [
"saralindsveins@gmail.com"
] | saralindsveins@gmail.com |
12148e25092c6f6329984c046651dba85edfb209 | 3c2e75d3563053dd186dcff324fd84eba561f2a7 | /python/onos/rsm/__init__.py | b94966701cbd5b870c5f829c3de02c8bd040a16c | [
"Apache-2.0"
] | permissive | stjordanis/onos-api | 00c2434090b9f51d7eacf00f082abd7f2146c1fc | 13fca9dc160a23bc9d89e4ef33ee2da9b2a8ee48 | refs/heads/master | 2023-09-02T11:07:58.824154 | 2021-11-01T17:40:27 | 2021-11-01T17:40:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 10,002 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: onos/rsm/rsm.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import Dict, List, Optional
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
class SliceType(betterproto.Enum):
SLICE_TYPE_DL_SLICE = 0
SLICE_TYPE_UL_SLICE = 1
class SchedulerType(betterproto.Enum):
SCHEDULER_TYPE_ROUND_ROBIN = 0
SCHEDULER_TYPE_PROPORTIONALLY_FAIR = 1
SCHEDULER_TYPE_QOS_BASED = 2
class UeIdType(betterproto.Enum):
UE_ID_TYPE_CU_UE_F1_AP_ID = 0
UE_ID_TYPE_DU_UE_F1_AP_ID = 1
UE_ID_TYPE_RAN_UE_NGAP_ID = 2
UE_ID_TYPE_AMF_UE_NGAP_ID = 3
UE_ID_TYPE_ENB_UE_S1_AP_ID = 4
@dataclass(eq=False, repr=False)
class SliceItem(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_ids: List[str] = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class Ack(betterproto.Message):
success: bool = betterproto.bool_field(1)
cause: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class CreateSliceRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_id: str = betterproto.string_field(2)
scheduler_type: "SchedulerType" = betterproto.enum_field(3)
weight: str = betterproto.string_field(4)
slice_type: "SliceType" = betterproto.enum_field(5)
@dataclass(eq=False, repr=False)
class CreateSliceResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class UpdateSliceRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_id: str = betterproto.string_field(2)
scheduler_type: "SchedulerType" = betterproto.enum_field(3)
weight: str = betterproto.string_field(4)
slice_type: "SliceType" = betterproto.enum_field(5)
@dataclass(eq=False, repr=False)
class UpdateSliceResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class DeleteSliceRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
slice_id: str = betterproto.string_field(2)
slice_type: "SliceType" = betterproto.enum_field(3)
@dataclass(eq=False, repr=False)
class DeleteSliceResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class SliceAssocItem(betterproto.Message):
ue_slice_assoc_id: str = betterproto.string_field(1)
e2_node_id: str = betterproto.string_field(2)
ue_id: List["UeIdType"] = betterproto.enum_field(3)
slice_id: str = betterproto.string_field(4)
@dataclass(eq=False, repr=False)
class UeId(betterproto.Message):
ue_id: str = betterproto.string_field(1)
type: "UeIdType" = betterproto.enum_field(2)
@dataclass(eq=False, repr=False)
class SetUeSliceAssociationRequest(betterproto.Message):
e2_node_id: str = betterproto.string_field(1)
ue_id: List["UeId"] = betterproto.message_field(2)
dl_slice_id: str = betterproto.string_field(3)
ul_slice_id: str = betterproto.string_field(4)
drb_id: str = betterproto.string_field(5)
@dataclass(eq=False, repr=False)
class SetUeSliceAssociationResponse(betterproto.Message):
ack: "Ack" = betterproto.message_field(1)
assigned_ue_slice_assoc_id: str = betterproto.string_field(2)
class RsmStub(betterproto.ServiceStub):
async def create_slice(
self,
*,
e2_node_id: str = "",
slice_id: str = "",
scheduler_type: "SchedulerType" = None,
weight: str = "",
slice_type: "SliceType" = None,
) -> "CreateSliceResponse":
request = CreateSliceRequest()
request.e2_node_id = e2_node_id
request.slice_id = slice_id
request.scheduler_type = scheduler_type
request.weight = weight
request.slice_type = slice_type
return await self._unary_unary(
"/onos.rsm.Rsm/CreateSlice", request, CreateSliceResponse
)
async def update_slice(
self,
*,
e2_node_id: str = "",
slice_id: str = "",
scheduler_type: "SchedulerType" = None,
weight: str = "",
slice_type: "SliceType" = None,
) -> "UpdateSliceResponse":
request = UpdateSliceRequest()
request.e2_node_id = e2_node_id
request.slice_id = slice_id
request.scheduler_type = scheduler_type
request.weight = weight
request.slice_type = slice_type
return await self._unary_unary(
"/onos.rsm.Rsm/UpdateSlice", request, UpdateSliceResponse
)
async def delete_slice(
self,
*,
e2_node_id: str = "",
slice_id: str = "",
slice_type: "SliceType" = None,
) -> "DeleteSliceResponse":
request = DeleteSliceRequest()
request.e2_node_id = e2_node_id
request.slice_id = slice_id
request.slice_type = slice_type
return await self._unary_unary(
"/onos.rsm.Rsm/DeleteSlice", request, DeleteSliceResponse
)
async def set_ue_slice_association(
self,
*,
e2_node_id: str = "",
ue_id: Optional[List["UeId"]] = None,
dl_slice_id: str = "",
ul_slice_id: str = "",
drb_id: str = "",
) -> "SetUeSliceAssociationResponse":
ue_id = ue_id or []
request = SetUeSliceAssociationRequest()
request.e2_node_id = e2_node_id
if ue_id is not None:
request.ue_id = ue_id
request.dl_slice_id = dl_slice_id
request.ul_slice_id = ul_slice_id
request.drb_id = drb_id
return await self._unary_unary(
"/onos.rsm.Rsm/SetUeSliceAssociation",
request,
SetUeSliceAssociationResponse,
)
class RsmBase(ServiceBase):
async def create_slice(
self,
e2_node_id: str,
slice_id: str,
scheduler_type: "SchedulerType",
weight: str,
slice_type: "SliceType",
) -> "CreateSliceResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def update_slice(
self,
e2_node_id: str,
slice_id: str,
scheduler_type: "SchedulerType",
weight: str,
slice_type: "SliceType",
) -> "UpdateSliceResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delete_slice(
self, e2_node_id: str, slice_id: str, slice_type: "SliceType"
) -> "DeleteSliceResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def set_ue_slice_association(
self,
e2_node_id: str,
ue_id: Optional[List["UeId"]],
dl_slice_id: str,
ul_slice_id: str,
drb_id: str,
) -> "SetUeSliceAssociationResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_create_slice(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"slice_id": request.slice_id,
"scheduler_type": request.scheduler_type,
"weight": request.weight,
"slice_type": request.slice_type,
}
response = await self.create_slice(**request_kwargs)
await stream.send_message(response)
async def __rpc_update_slice(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"slice_id": request.slice_id,
"scheduler_type": request.scheduler_type,
"weight": request.weight,
"slice_type": request.slice_type,
}
response = await self.update_slice(**request_kwargs)
await stream.send_message(response)
async def __rpc_delete_slice(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"slice_id": request.slice_id,
"slice_type": request.slice_type,
}
response = await self.delete_slice(**request_kwargs)
await stream.send_message(response)
async def __rpc_set_ue_slice_association(
self, stream: grpclib.server.Stream
) -> None:
request = await stream.recv_message()
request_kwargs = {
"e2_node_id": request.e2_node_id,
"ue_id": request.ue_id,
"dl_slice_id": request.dl_slice_id,
"ul_slice_id": request.ul_slice_id,
"drb_id": request.drb_id,
}
response = await self.set_ue_slice_association(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/onos.rsm.Rsm/CreateSlice": grpclib.const.Handler(
self.__rpc_create_slice,
grpclib.const.Cardinality.UNARY_UNARY,
CreateSliceRequest,
CreateSliceResponse,
),
"/onos.rsm.Rsm/UpdateSlice": grpclib.const.Handler(
self.__rpc_update_slice,
grpclib.const.Cardinality.UNARY_UNARY,
UpdateSliceRequest,
UpdateSliceResponse,
),
"/onos.rsm.Rsm/DeleteSlice": grpclib.const.Handler(
self.__rpc_delete_slice,
grpclib.const.Cardinality.UNARY_UNARY,
DeleteSliceRequest,
DeleteSliceResponse,
),
"/onos.rsm.Rsm/SetUeSliceAssociation": grpclib.const.Handler(
self.__rpc_set_ue_slice_association,
grpclib.const.Cardinality.UNARY_UNARY,
SetUeSliceAssociationRequest,
SetUeSliceAssociationResponse,
),
}
| [
"noreply@github.com"
] | stjordanis.noreply@github.com |
ae0aef6a83feebd6e73b83650c28cc5c4c536818 | ce3cbf8ed094808408634f6e9257bb575f65e654 | /principal/forms.py | 9f7048e6ed1cb70adc34136e3bc8e162d7d7a137 | [] | no_license | iosamuel/meetup | 165e6e4bf83c843507c45cb1ad78709d787c711a | af7a746dee0344ca41eae3b64571a7fa3ca6a39c | refs/heads/master | 2016-09-06T08:52:28.898449 | 2013-06-01T15:56:29 | 2013-06-01T15:56:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | from django import forms
from .models import *
class EventosForm(forms.ModelForm):
class Meta:
model = Eventos
# widgets = {
# 'nombre': forms.InputText(attrs={'class':'inputB inputA'})
# }
# <input class="inputA inputB">
class UsuariosForm(forms.ModelForm):
class Meta:
model = Usuarios
exclude = ('eventos',) | [
"samuelb1311@gmail.com"
] | samuelb1311@gmail.com |
2fc6d94c3b2b11ba86ebe114e83a451f636f25c9 | 18df22a18f34b4e3066e797f024fb2e475408847 | /praca_z_danymi.py | 527424c0273538f106cccc209a5f923f8c55814d | [] | no_license | Karolinak246/Programing-in-Python | 8841d10495d661ad3708a1ba07cfe92c1c93a87d | 47093309b5cee0424552cb5fee0272cf78758e8c | refs/heads/main | 2023-01-31T20:09:50.361568 | 2020-12-15T23:02:25 | 2020-12-15T23:02:25 | 321,491,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | ################################
# 7.12.2020
# Kopczak Karolina
# Programowanie w jezyku Python
# Zadania: PRACA Z DANYMI
################################
#!/usr/bin/env python3
path = "/home/pi/Documents/Python"
from xml.dom import minidom
import xml.dom.minidom
import os
import numpy as np
import pandas as pd
import csv
### TASK 1 ###
def xml():
file = minidom.parse("XML/somexml.xml")
tag = file.getElementsByTagName("CATALOG")[0]
print("Tag name before changing: ", file.firstChild.tagName)
tag.tagName = "change"
file.writexml(open('XML/some_changed_xml.xml', 'w'))
file2 = minidom.parse("XML/some_changed_xml.xml")
print("Tag name after changing: ", file2.firstChild.tagName)
xml()
### TASK 2 ###
def csvjson():
if os.path.isfile("CSV/file.csv")==True:
try:
data = pd.read_csv("CSV/file.csv")
print(data)
deleterecord = input ("Would you like to delete last record? Y/N ")
if deleterecord == "y" or deleterecord == "Y":
data.drop(data.tail(1).index, inplace = True)
print (data)
data.to_csv("CSV/file.csv", sep ="\t")
except:
print("The file is empty, data will be added.")
data = pd.DataFrame()
else:
print("There is no file at such name. file.csv will be created.")
newrecord = input("Would you like to add new record? Y/N ")
if newrecord == "y" or newrecord == "Y":
recA = input("Enter the title of the movie: ")
recB = input("Enter the code of CD: ")
recC = input("Enter Client's name: ")
recD = input("Enter Clinet's surname: ")
recE = input("Enter phone number: ")
recF = input("Enter amount of days (how long will the film be on loan?): ")
recG = input("Enter the price: ")
df = pd.DataFrame({ "Title":[recA],
"Code":[recB],
"Name":[recC],
"Surname":[recD],
"Phone_number":[recE],
"Days":[recF],
"Price":[recG]})
if data.empty == True:
df.to_csv("CSV/file.csv", sep = "\t", header = True)
else:
df.to_csv("CSV/file.csv", sep = "\t", mode = "a", header = False)
csvjson()
| [
"noreply@github.com"
] | Karolinak246.noreply@github.com |
a11962ae95b28d1923e23d0a5c514d53c454524e | 7889f7f0532db6a7f81e6f8630e399c90438b2b9 | /3.7.1/_downloads/a54f19823bde998a456571636498aa98/auto_subplots_adjust.py | bd6326b8291f4b1a16db182e1f642d2279a8f0b0 | [] | no_license | matplotlib/matplotlib.github.com | ef5d23a5bf77cb5af675f1a8273d641e410b2560 | 2a60d39490941a524e5385670d488c86083a032c | refs/heads/main | 2023-08-16T18:46:58.934777 | 2023-08-10T05:07:57 | 2023-08-10T05:08:30 | 1,385,150 | 25 | 59 | null | 2023-08-30T15:59:50 | 2011-02-19T03:27:35 | null | UTF-8 | Python | false | false | 3,366 | py | """
===============================================
Programmatically controlling subplot adjustment
===============================================
.. note::
This example is primarily intended to show some advanced concepts in
Matplotlib.
If you are only looking for having enough space for your labels, it is
almost always simpler and good enough to either set the subplot parameters
manually using `.Figure.subplots_adjust`, or use one of the automatic
layout mechanisms
(:doc:`/tutorials/intermediate/constrainedlayout_guide` or
:doc:`/tutorials/intermediate/tight_layout_guide`).
This example describes a user-defined way to read out Artist sizes and
set the subplot parameters accordingly. Its main purpose is to illustrate
some advanced concepts like reading out text positions, working with
bounding boxes and transforms and using
:ref:`events <event-handling-tutorial>`. But it can also serve as a starting
point if you want to automate the layouting and need more flexibility than
tight layout and constrained layout.
Below, we collect the bounding boxes of all y-labels and move the left border
of the subplot to the right so that it leaves enough room for the union of all
the bounding boxes.
There's one catch with calculating text bounding boxes:
Querying the text bounding boxes (`.Text.get_window_extent`) needs a
renderer (`.RendererBase` instance), to calculate the text size. This renderer
is only available after the figure has been drawn (`.Figure.draw`).
A solution to this is putting the adjustment logic in a draw callback.
This function is executed after the figure has been drawn. It can now check
if the subplot leaves enough room for the text. If not, the subplot parameters
are updated and second draw is triggered.
.. redirect-from:: /gallery/pyplots/auto_subplots_adjust
"""
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
fig, ax = plt.subplots()
ax.plot(range(10))
ax.set_yticks([2, 5, 7], labels=['really, really, really', 'long', 'labels'])
def on_draw(event):
bboxes = []
for label in ax.get_yticklabels():
# Bounding box in pixels
bbox_px = label.get_window_extent()
# Transform to relative figure coordinates. This is the inverse of
# transFigure.
bbox_fig = bbox_px.transformed(fig.transFigure.inverted())
bboxes.append(bbox_fig)
# the bbox that bounds all the bboxes, again in relative figure coords
bbox = mtransforms.Bbox.union(bboxes)
if fig.subplotpars.left < bbox.width:
# Move the subplot left edge more to the right
fig.subplots_adjust(left=1.1*bbox.width) # pad a little
fig.canvas.draw()
fig.canvas.mpl_connect('draw_event', on_draw)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.artist.Artist.get_window_extent`
# - `matplotlib.transforms.Bbox`
# - `matplotlib.transforms.BboxBase.transformed`
# - `matplotlib.transforms.BboxBase.union`
# - `matplotlib.transforms.Transform.inverted`
# - `matplotlib.figure.Figure.subplots_adjust`
# - `matplotlib.figure.SubplotParams`
# - `matplotlib.backend_bases.FigureCanvasBase.mpl_connect`
| [
"quantum.analyst@gmail.com"
] | quantum.analyst@gmail.com |
ba15778cb67c98978cb7fc27b5541dcb2ceeba8a | 3e0e9508b3b8d5609392fd64c57491d278c73c89 | /venv/bin/gunicorn | c5ff15177c6bba9a6f255f72a472181d98d2c757 | [] | no_license | raghav96/DatabaseWebApp | 9d59c9866e6977aac0ead06d242e01c5724211d0 | a81ba383393c61877ad40acc44cda30ef2d7b19e | refs/heads/master | 2021-01-02T08:36:38.272371 | 2017-08-01T18:21:47 | 2017-08-01T18:21:47 | 99,030,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | #!/Users/raghav/flask-app/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"raghav96@github.com"
] | raghav96@github.com | |
b28bbc203b60e128307f6f9d8d309793f3dc1e1a | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /yXZhG7zq6dWhWhirt_24.py | 1b4a59876a63f5dfdb4b9e7de1d41c308c735314 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py |
def is_prime(n):
if (n==1):
return False
for i in range(2,round(n**(0.5))+1):
if i!=n and (n%i)==0:
return False
return True
def filter_primes(num):
return [n for n in num if is_prime(n)]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
bbda84923f2c455dc60051aa1e126bf4dd187233 | 4a88ec266b64521fcaef88d92cb2b57776d3192b | /powerUsageNotification/powerUsageNotification.py | 132c11551e75d59adb52856ce265d156f20d6af7 | [
"MIT"
] | permissive | johntdyer/appdaemon-scripts | 4e5ea345d27d54d8133be212e5f7af57b8dfd57f | ce7e32a919be5a835d0bdf95e6650ff34b699220 | refs/heads/master | 2020-03-31T18:01:49.517418 | 2018-10-07T14:06:38 | 2018-10-07T14:06:38 | 152,443,705 | 1 | 0 | null | 2018-10-10T15:10:00 | 2018-10-10T15:09:59 | null | UTF-8 | Python | false | false | 4,100 | py | import appdaemon.plugins.hass.hassapi as hass
import globals
#
# App which notifies you when a power usage sensor indicated a device is on/off
#
#
# Args:
#
# app_switch: on/off switch for this app. example: input_boolean.turn_fan_on_when_hot
# sensor: power sensor. example: sensor.dishwasher_power_usage
# notify_name: Who to notify. example: group_notifications
# delay: seconds to wait until a the device is considered "off". example: 60
# threshold: amount of "usage" which indicated the device is on. example: 2
# alternative_name: Name to use in notification. example: Waschmaschine
#
# Release Notes
#
# Version 1.3:
# use Notify App
#
# Version 1.2:
# message now directly in own yaml instead of message module
#
# Version 1.1:
# Added app_switch
#
# Version 1.0:
# Initial Version
class PowerUsageNotification(hass.Hass):
def initialize(self):
self.timer_handle_list = []
self.listen_event_handle_list = []
self.listen_state_handle_list = []
self.app_switch = globals.get_arg(self.args,"app_switch")
self.sensor = globals.get_arg(self.args,"sensor")
self.alternative_name = globals.get_arg(self.args,"alternative_name")
self.notify_name = globals.get_arg(self.args,"notify_name")
self.delay = globals.get_arg(self.args,"delay")
self.threshold = globals.get_arg(self.args,"threshold")
self.message = globals.get_arg(self.args,"message_DE")
self.message_off = globals.get_arg(self.args,"message_off_DE")
self.triggered = False
self.isWaitingHandle = None
self.notifier = self.get_app('Notifier')
# Subscribe to sensors
self.listen_state_handle_list.append(self.listen_state(self.state_change, self.sensor))
def state_change(self, entity, attribute, old, new, kwargs):
if self.get_state(self.app_switch) == "on":
# Initial: power usage goes up
if ( new != None and new != "" and not self.triggered and float(new) > self.threshold ):
self.triggered = True
self.log("Power Usage is: {}".format(float(new)))
self.log("Setting triggered to: {}".format(self.triggered))
self.notifier.notify(self.notify_name, self.message.format(self.alternative_name))
# Power usage goes down below threshold
elif ( new != None and new != "" and self.triggered and self.isWaitingHandle == None and float(new) <= self.threshold):
self.log("Waiting: {} seconds to notify.".format(self.delay))
self.isWaitingHandle = self.run_in(self.notify_device_off,self.delay)
self.log("Setting isWaitingHandle to: {}".format(self.isWaitingHandle))
self.timer_handle_list.append(self.isWaitingHandle)
# Power usage goes up before delay
elif( new != None and new != "" and self.triggered and self.isWaitingHandle != None and float(new) > self.threshold):
self.log("Cancelling timer")
self.cancel_timer(self.isWaitingHandle)
self.isWaitingHandle = None
self.log("Setting isWaitingHandle to: {}".format(self.isWaitingHandle))
def notify_device_off(self, kwargs):
"""Notify User that device is off. This may get cancelled if it turns on again in the meantime"""
self.triggered = False
self.log("Setting triggered to: {}".format(self.triggered))
self.isWaitingHandle = None
self.log("Setting isWaitingHandle to: {}".format(self.isWaitingHandle))
self.log("Notifying user")
self.notifier.notify(self.notify_name, self.message_off.format(self.alternative_name))
def terminate(self):
for timer_handle in self.timer_handle_list:
self.cancel_timer(timer_handle)
for listen_event_handle in self.listen_event_handle_list:
self.cancel_listen_event(listen_event_handle)
for listen_state_handle in self.listen_state_handle_list:
self.cancel_listen_state(listen_state_handle) | [
"k.eifinger@googlemail.com"
] | k.eifinger@googlemail.com |
de440b2a397df3a824c6c85058fa980aa28a0340 | 52083499c5661e9034758135bf1ee2e625a9d2fb | /src/surround_view_sim/build/catkin_generated/generate_cached_setup.py | ca755387a7ec711615f8b35eb5609d860ad867bb | [] | no_license | pennluo/SurroundView_Simulation | 12d186545a5b53a32c47e8cfb1f1bfd3cc1f946f | ae7d98caf9c89325bcf776f119e9fd655cac8ebb | refs/heads/master | 2022-09-30T12:05:24.610092 | 2019-04-14T15:59:55 | 2019-04-14T15:59:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/reuben/catkin_ws/src/Surround_View_Sim/src/surround_view_sim/build/devel/env.sh')
output_filename = '/home/reuben/catkin_ws/src/Surround_View_Sim/src/surround_view_sim/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"luopengchn@gmail.com"
] | luopengchn@gmail.com |
ca4715ede55c40f5896201b4076cd2db446a2b25 | 33f87a7b8bf317894bff46d1f57dce5aea363b77 | /Unit_3/Prog_10.py | a827d6fbf1eaabadf8c7d56ec8cfd2b16f0d7647 | [] | no_license | beulah444/Dr.AIT_Python_Course_2015 | dd7f31659591225393bb81a4918d9a7df09e5f27 | 9fe4dd1fe0cbc093795901f8f272afe25adccef4 | refs/heads/master | 2021-01-22T17:28:51.383517 | 2016-06-10T19:37:57 | 2016-06-10T19:37:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | __author__ = 'Dr.S.Gowrishankar'
class ListSum:
def __init__(self, listArg):
self.listArg = listArg
def __init__(self, listArg):
self.listArg = listArg
def ComputeSum(self):
print sum(self.listArg)
#a =[1,2,3,4,5]
#ls = ListSum(a) #no need to write these two lines
ls = ListSum([1,2,3,4,5]) # you can directly pass the list values
ls.ComputeSum()
| [
"learndatasciencewithr@gmail.com"
] | learndatasciencewithr@gmail.com |
f617f398edfe0e8e5b141797e5bcd020e521010e | 925c667f928d8a08a22cbb6c0bffbd5264a4db85 | /primerProyectoDjango/firtPro/firtPro/asgi.py | e210908218fc3e14599a75b10486c36247c84939 | [] | no_license | jesusManuelJuarez/jesusManuelJuarez | 62dd5770791e8c08942fe753a1cead687db3ab7a | c74ed3734ccd2a4e4ee8c5672cccd2102b54d5af | refs/heads/master | 2022-04-11T12:23:59.928223 | 2020-04-02T22:32:43 | 2020-04-02T22:32:43 | 250,353,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
ASGI config for firtPro project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'firtPro.settings')
application = get_asgi_application()
| [
"noreply@github.com"
] | jesusManuelJuarez.noreply@github.com |
c4281a41c161ba65c8915083ae81b981745630ca | 9775ab319e5c1f2270a132b0244f0847db42589b | /nilai/migrations/0008_auto_20210117_1010.py | d2abe2075f8f24b61525b7b5c136dcc1bf54b97d | [] | no_license | nabaman/SPK-SAW | 9aa8dfaf1bf5162bae1dc5c97e2b3e033a08294b | 5c0b8d491f23939615aa968cd52f081072fe2230 | refs/heads/master | 2023-02-18T17:38:21.028901 | 2021-01-22T15:37:06 | 2021-01-22T15:37:06 | 331,987,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # Generated by Django 3.1.5 on 2021-01-17 10:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nilai', '0007_auto_20210117_1004'),
]
operations = [
migrations.RemoveField(
model_name='data_krips',
name='kriteria',
),
migrations.AddField(
model_name='data_kriteria',
name='krips',
field=models.ManyToManyField(to='nilai.Data_Krips'),
),
]
| [
"naba.alvian@gmail.com"
] | naba.alvian@gmail.com |
5db6fa1f99c5b7ac65079c7fd585ce8c7915f235 | 817085c4009e48db05e4a30815fdd92ee27513f9 | /venv/Scripts/pip-script.py | 87639661c7805bc7bbf60fa04e4b53e33d5922f8 | [] | no_license | bluesnie/novel | 7e3a2f403def8fe3e1d9c8c1ba4e2a80344c39e0 | c11076ca61c619a2b7c1423d742d3f4c63dc1fed | refs/heads/master | 2020-04-24T02:07:07.516575 | 2019-02-20T07:44:19 | 2019-02-20T07:44:19 | 171,486,867 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | #!C:\Users\lenovo\PycharmProjects\novel\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"l"
] | l |
b155ef0630a0290fc9d2d59511401268ad789fe4 | ec3ec2e292da450b095c8a4963a8aeb222d141bf | /model/DAO/ClienteDAO.py | 4349684129a4f5fa88ae857c388fd1c13621fc68 | [] | no_license | ojuliomiguel/Gerenciador-de-Fiados | b3aa97d8b65e595445f2598fc9e1bae1df289430 | da239a8de12d30bf5cd284322086fdffcfc9f851 | refs/heads/master | 2021-10-28T00:38:53.399247 | 2019-04-20T17:02:04 | 2019-04-20T17:02:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | import sqlite3
import os
from DataBase.ConexaoSQL import ConexaoSQL
class ClienteDAO:
def buscarCliente(nome):
try:
con = ConexaoSQL.conexaoBd()
cur = con.cursor()
query = """
SELECT id, nome
FROM clientes
WHERE nome = '{}'
""".format(nome)
cur.execute(query)
data = cur.fetchone()
return data[1]
except TypeError:
print('Retorno da funcao buscarCliente() vazio')
def cadastrarCliente(cliente):
con = ConexaoSQL.conexaoBd()
cur = con.cursor()
query = """
INSERT INTO clientes (nome)
VALUES ('{}')
""".format(cliente.Nome)
cur.execute(query)
con.commit()
print('Dados cadastrados com sucesso')
def excluirCliente(nome):
con = ConexaoSQL.conexaoBd()
cur = con.cursor()
query = """
DELETE
FROM clientes
WHERE nome = '{}'
""".format(nome)
cur.execute(query)
con.commit()
def listarClientes():
try:
con = ConexaoSQL.conexaoBd()
cur = con.cursor()
query = """
SELECT id, nome
FROM clientes
"""
cur.execute(query)
data = cur.fetchall()
return data
except TypeError:
print('Retorno da funcao buscarCliente() vazio')
| [
"juliomiguelsouzacosta@gmail.com"
] | juliomiguelsouzacosta@gmail.com |
f2542cb91db283b5aae1bcebc74918ad123999bd | a78aff707dbaf053c78ead5f4631fc3ba0f0611b | /Code/data.py | eb210da3c8715ef8d4061df88a1b36dd87efea14 | [] | no_license | GitteVW/Multi-Instance-Experiments | b6ac3eed60df3ee8938dc3f96696c0924f3d98bb | 47de57f56c4b407765d9355dd97a4f27d2f0ba07 | refs/heads/master | 2020-05-16T21:27:36.589189 | 2015-01-26T21:16:29 | 2015-01-26T21:16:29 | 28,873,499 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,689 | py | '''
Datasets and algorithms
'''
labels_sival=['ajaxorange','apple','banana','bluescrunge','dirtyworkgloves','juliespot','checkeredscarf','wd40can','candlewithholder','glazedwoodpot','cokecan','smileyfacedoll','dataminingbook','rapbook','feltflowerrug','translucentbowl','greenteabox','cardboardbox','dirtyrunningshoe','largespoon','goldmedal','spritecan','stripednotebook','fabricsoftenerbox','woodrollingpin']
labels_text=['alt_atheism','comp_graphics','comp_os_ms-windows_misc','comp_sys_ibm_pc_hardware','comp_sys_mac_hardware','comp_windows_x','misc_forsale','rec_autos','rec_motorcycles',\
'rec_sport_baseball','rec_sport_hockey','sci_crypt','sci_electronics','sci_med','sci_space','soc_religion_christian','talk_politics_guns','talk_politics_mideast','talk_politics_misc','talk_religion_misc']
labels_uci=['adult_12', 'adult_13', 'adult_23', 'adult_14', 'adult_24', 'adult_15', 'adult_25', 'adult_110', 'adult_210','diabetes_12', 'diabetes_13', 'diabetes_23','spam_12', 'spam_13', 'spam_23', 'spam_14', 'spam_24', 'spam_15', 'spam_25', 'spam_110', 'spam_210','tictactoe_12', 'tictactoe_13', 'tictactoe_23', 'transfusion_12', 'transfusion_13', 'transfusion_23']
datasets={'uci':labels_uci,'text':labels_text,'sival':labels_sival}
milearners=['MILR','AdaBoostM1','MILRC','MIRI','MIDD','MDD','MIOptimalBall','TLD','MIWrapper','CitationKNN','MISMO','SimpleMI','MIEMDD','MISVM']
silearners=['AdaBoostM1','RBFNetwork', 'J48','SMO','IBk','Logistic']
learnerTuples=[('SimpleMI','J48'),('AdaBoostM1','AdaBoostM1'),('MIWrapper','J48'),('CitationKNN','IBk'),('MILR','Logistic'),('MISMO','SMO')]
def createFeatureList(label,datasetType):
"""
Create a list with each item being the description of one single-instance feature (arff format) for a given label and dataset type (SIVAL, text, UCI).
"""
def createFeature(i):
return '\t@attribute f'+str(i)+': numeric\n'
if datasetType=='sival':
featureList = list(map(createFeature, range(30)))
if datasetType =='text':
featureList = list(map(createFeature, range(200)))
if datasetType =='uci':
if label.split('_')[0]=='transfusion':
featureList = list(map(createFeature, range(4)))
if label.split('_')[0]=='tictactoe':
featureList = list(map(createFeature, range(9)))
if label.split('_')[0]=='spam':
featureList = list(map(createFeature, range(57)))
if label.split('_')[0]=='adult':
featureList = list(map(createFeature, range(14)))
if label.split('_')[0]=='diabetes': # pima indians
featureList = list(map(createFeature, range(8)))
return featureList
| [
"gitte.vanwinckelen@gmail.com"
] | gitte.vanwinckelen@gmail.com |
db0098a5052813ec73014ee3786d77074ce4c3cf | a0727e9880bd4cbd575b1ead8404fac2a076ae7e | /src/tests/api/test_rol.py | 1c6f99376508b971e3ca4eab9191f0341534da56 | [
"MIT"
] | permissive | Haythem122/fastapi-docker | 57620631e18310df8aa2e8a27909249ffc52bf22 | 90b84161599fa23225649aef8a0532a571d350fe | refs/heads/master | 2023-08-12T10:58:43.601587 | 2021-09-20T23:11:47 | 2021-09-20T23:11:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,980 | py |
def test_create_role(client, super_user_token):
data = {
'name': 'new_role'
}
response = client.post(
'/api/v1/role', json=data, headers={
'token': super_user_token
}
)
assert response.status_code == 201, response.text
response_json = response.json()
expected_data = {'name': 'new_role', 'id': 3}
assert response_json == expected_data
def test_get_role(client, super_user_token):
response = client.get(
'/api/v1/role/1', headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {'name': 'ADMINISTRATOR', 'id': 1}
assert response_json == expected_data
def test_get_roles(client, super_user_token):
response = client.get(
'/api/v1/roles?page=1', headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {
'previous_page': None, 'next_page': None, 'total': 2,
'pages': 1, 'data': [
{'name': 'ADMINISTRATOR', 'id': 1},
{'name': 'BASIC', 'id': 2}
]
}
assert response_json == expected_data
def test_update_role(client, super_user_token):
data = {'name': 'new_name', 'id': 2}
response = client.put(
'/api/v1/role', json=data,
headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
assert response_json == data
def test_delete_role(client, super_user_token):
response = client.delete(
'/api/v1/role/2',
headers={
'token': super_user_token
}
)
assert response.status_code == 200, response.text
response_json = response.json()
expected_data = {'name': 'BASIC', 'id': 2}
assert response_json == expected_data
| [
"jeremysilvasilva@gmail.com"
] | jeremysilvasilva@gmail.com |
a643d38e90646191463eca1bc229387c66c1a11f | 65e0c11d690b32c832b943fb43a4206739ddf733 | /bsdradius/trunk/bsdradius/configDefaults.py | 244f2e4ef1d3488677ee5ad1c6d9c71ef18e43ac | [
"BSD-3-Clause"
] | permissive | Cloudxtreme/bsdradius | b5100062ed75c3201d179e190fd89770d8934aee | 69dba67e27215dce49875e94a7eedbbdf77bc784 | refs/heads/master | 2021-05-28T16:50:14.711056 | 2015-04-30T11:54:17 | 2015-04-30T11:54:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,442 | py | ## BSDRadius is released under BSD license.
## Copyright (c) 2006, DATA TECH LABS
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
## * Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
## * Neither the name of the DATA TECH LABS nor the names of its contributors
## may be used to endorse or promote products derived from this software without
## specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Define configuration defaults here
"""
# HeadURL $HeadURL: file:///Z:/backup/svn/bsdradius/trunk/bsdradius/configDefaults.py $
# Author: $Author: valts $
# File version: $Revision: 278 $
# Last changes: $Date: 2006-11-26 15:45:52 +0200 (Sv, 26 Nov 2006) $
prefix = '/usr/local'
# define default values
# format: {'section' : {'option' : value}}
defaultOptions = {
'PATHS' : {
'prefix' : prefix,
'conf_dir' : '%(prefix)s/etc/bsdradius',
'run_dir' : '%(prefix)s/var/run',
'log_dir' : '%(prefix)s/var/log/bsdradius',
'user_module_dir' : '%(conf_dir)s/user_modules',
'dictionary_dir' : '%(prefix)s/share/bsdradius/dictionaries',
'dictionary_file' : '%(dictionary_dir)s/dictionary',
'server_log_file' : '%(log_dir)s/bsdradiusd.log',
'pid_file' : '%(run_dir)s/bsdradiusd.pid',
'clients_file' : '%(conf_dir)s/clients.conf',
'modules_file' : '%(conf_dir)s/modules.conf',
'user_modules_file' : '%(conf_dir)s/user_modules.conf',
'config_file' : '%(conf_dir)s/bsdradiusd.conf'
},
'SERVER' : {
'home' : '',
'user' : '',
'group' : '',
'auth_port' : '1812',
'acct_port' : '1813',
'number_of_threads' : '10',
'foreground' : 'no',
'no_threads' : 'no',
'log_to_screen': 'no',
'log_to_file' : 'no',
'debug_mode' : 'no',
'log_client' : '',
'fast_accounting': 'no',
},
'DATABASE' : {
'enable' : 'no',
'type' : 'postgresql',
'host' : 'localhost',
'user' : 'bsdradius',
'pass' : '',
'name' : 'bsdradius',
'refresh_rate' : '60',
'clients_query' : 'select address, name, secret from radiusClients',
},
'AUTHORIZATION' : {
'packet_timeout' : '5',
'auth_queue_maxlength' : '300',
'modules' : '',
},
'ACCOUNTING' : {
'acct_queue_maxlength' : '300',
'modules' : '',
},
}
# Define option types.
# It is really neccessary to define only other types
# than string because Config parser converts everything
# to string by default.
# Format: {'section' : {'option' : 'type'}}
defaultTypes = {
'SERVER' : {
'auth_port' : 'int',
'acct_port' : 'int',
'number_of_threads' : 'int',
'foreground' : 'bool',
'no_threads' : 'bool',
'log_to_screen': 'bool',
'log_to_file': 'bool',
'debug_mode' : 'bool',
'fast_accounting': 'bool',
},
'DATABASE' : {
'enable' : 'bool',
'refresh_rate' : 'int',
},
'AUTHORIZATION' : {
'packet_timeout' : 'int',
'auth_queue_maxlength' : 'int',
},
'ACCOUNTING' : {
'acct_queue_maxlength' : 'int',
},
}
# configuration defaults for one BSD Radius module
moduleConfigDefaults = {
'enable': 'yes',
'configfile': '',
'startup_module': '',
'startup_function': '',
'authorization_module': '',
'authorization_function': '',
'authentication_module': '',
'authentication_function': '',
'accounting_module': '',
'accounting_function': '',
'shutdown_module': '',
'shutdown_function': '',
'pythonpath' : '',
}
| [
"valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef"
] | valdiic@72071c86-a5be-11dd-a5cd-697bfd0a0cef |
5bf1f054acecf712806195e5754388d8d6ab6e98 | 2cabf2ef7640f6203ce6c95ee638793c806c2354 | /venv/Scripts/pip3-script.py | ed3e0c46993f2f30d9fd3af434f3e4ea7ba7c22b | [] | no_license | nunodores/suziapp | 4288347e4436981704fa80ae1416a2a486919fe7 | 43e33369ed64e0fe8eec6e94cc29809fa75dcb65 | refs/heads/master | 2021-07-20T08:34:26.340345 | 2018-12-01T22:28:58 | 2018-12-01T22:28:58 | 159,992,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | #!"C:\Users\Nuno Dorres\PycharmProjects\Test\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"41326@etu.he2b.be"
] | 41326@etu.he2b.be |
02247885ad972b4756997bffe5c07f2ebd394d4a | 908b6ee862375003eac9af6c8ea5b32533be4709 | /collective/phantasy/atphantasy/content/phantasyschema.py | 1842cf48d3c9871472abac28e8375078dccb4886 | [] | no_license | collective/collective.phantasy | c7199995d98be183eb163f5b73c3e89147f2e8a6 | 86cb66b0905f6bb284cfc03201dd5bbfa8f9010e | refs/heads/master | 2023-08-19T13:18:13.507960 | 2015-01-19T15:32:07 | 2015-01-19T15:32:07 | 29,517,296 | 0 | 0 | null | 2015-01-20T07:24:00 | 2015-01-20T07:24:00 | null | UTF-8 | Python | false | false | 37,998 | py | from Products.Archetypes.public import *
from collective.phantasy.config import I18N_DOMAIN
from Products.SmartColorWidget.Widget import SmartColorWidget
from Products.ATContentTypes.configuration import zconf
from Products.Archetypes.atapi import AnnotationStorage
from Products.validation.config import validation
from Products.validation.validators.SupplValidators import MaxSizeValidator
from Products.validation import V_REQUIRED
validation.register(MaxSizeValidator('checkImageMaxSize',
maxsize=zconf.ATImage.max_file_size))
from collective.phantasy import phantasyMessageFactory as _
try:
from iw.fss.FileSystemStorage import FileSystemStorage
HAS_FSS = True
except :
HAS_FSS = False
try:
from Products.FCKeditor.FckWidget import FckWidget
HAS_FCKWIDGET = True
except:
HAS_FCKWIDGET = False
PRESERVED_SCHEMATAS = ['default', 'images', 'dimensions', 'colors', 'fonts', 'borders', 'plone-overloads', 'viewlets', 'dynamic-viewlets']
CUSTOM_TOOL_BAR = """[
['Source','Preview','-','Templates'],
['Cut','Copy','Paste','PasteText','RemoveFormat'],
['Bold','Italic','Underline','StrikeThrough','-','Subscript','Superscript'],
['OrderedList','UnorderedList','-','Outdent','Indent'],
['Link','Unlink','Anchor','Image','imgmapPopup','Flash'],
['Style','FontFormat'],
['FitWindow']
]"""
def finalizePhantasySchema(schema):
"""Finalizes schema to alter some fields
"""
# Id must be valid and make description invisible
schema['id'].validators = ('isValidId',)
schema['description'].widget.visible = {'view':'invisible', 'edit':'invisible'}
# FSS Storage for skin screenshot if iw.fss is available
if HAS_FSS :
schema['screenshot'].storage = FileSystemStorage()
for fieldName in schema.keys() :
if schema[fieldName].schemata not in PRESERVED_SCHEMATAS :
# hide ATCTFolder metadata fields unuseful for skins
schema[fieldName].widget.visible = {'view':'invisible', 'edit':'invisible'}
# FCKWidget for viewlet fields if FCK is available
if HAS_FCKWIDGET and schema[fieldName].schemata == 'viewlets' :
schema[fieldName].widget = FckWidget (
description = schema[fieldName].widget.description,
label = schema[fieldName].widget.label,
rows=12,
width = '100%',
height ='150px',
fck_toolbar = 'Custom',
fck_custom_toolbar = CUSTOM_TOOL_BAR,
file_portal_type = 'PhantasySkinFile',
image_portal_type = 'PhantasySkinImage',
browse_images_portal_types = ['PhantasySkinImage', 'Image'],
fck_force_other_path_method = 'get_phantasy_relative_path',
fck_force_other_root_method = 'get_phantasy_relative_path',
# force no paragraphs in viewlets
keyboard_entermode = 'div',
allow_link_byuid = False,
start_expanded = True,
allow_file_upload = False)
if fieldName == 'logoViewlet' :
css_id = 'portal-logo'
elif fieldName == 'footerViewlet' :
css_id = 'portal-footer'
elif fieldName == 'colophonViewlet' :
css_id = 'portal-colophon'
schema[fieldName].widget.fck_area_css_id = css_id
schema[fieldName].widget.fck_area_css_class = ''
# Make a copy to reinitialize all layers
new_schema = schema.copy()
return new_schema
# in skin schema fields with same name as standard plone base_properties must always be required
PhantasyFieldsSchema = Schema((
StringField(
'cssfile',
schemata ='default',
widget=StringWidget(
description = _(u'description_css_file', u"""Enter a stylesheet file name, don't forget to upload the file in this skin.
This css will be applied at the end (after all properties). Use './myimage.jpg' in this css
to reference an image called 'myimage.jpg' from this skin."""),
label = _(u'label_css_file', u'Css File Name'),
),
),
ImageField(
'screenshot',
required=False,
primary=False,
languageIndependent=True,
storage = AnnotationStorage(migrate=True),
swallowResizeExceptions = zconf.swallowImageResizeExceptions.enable,
pil_quality = zconf.pil_config.quality,
pil_resize_algo = zconf.pil_config.resize_algo,
max_size = zconf.ATImage.max_image_dimension,
sizes= {'large' : (768, 768),
'preview' : (400, 400),
'mini' : (200, 200),
'thumb' : (128, 128),
'tile' : (64, 64),
'icon' : (32, 32),
'listing' : (16, 16),
},
validators = (('checkImageMaxSize', V_REQUIRED)),
widget = ImageWidget(
description = _(u'description_phantasy_screenshot',
default=u'Upload a screen Shot for this skin, used to help users to select a skin'),
label= _(u'label_phantasy_screenshot', default=u'Screen Shot'),
show_content_type = False,
preview_scale = 'mini',
),
),
# fields for viewlets overrides
TextField('logoViewlet',
schemata ='viewlets',
required=False,
searchable=False,
validators = ('isTidyHtmlWithCleanup',),
allowable_content_types = ('text/html',),
default_content_type = 'text/html',
default_output_type = 'text/x-html-safe',
widget = RichWidget(
description = _(u'description_logo_viewlet', u"""Override the logo viewlet,
you can add images or links with rich editor"""),
label = _(u'label_logo_viewlet', u'Logo Viewlet'),
rows = 25,
allow_file_upload = False),
),
TextField('footerViewlet',
schemata ='viewlets',
required=False,
searchable=False,
validators = ('isTidyHtmlWithCleanup',),
allowable_content_types = ('text/html',),
default_content_type = 'text/html',
default_output_type = 'text/x-html-safe',
widget = RichWidget(
description = _(u'description_footer_viewlet', u"""Override the footer viewlet,
you can add images or links with rich editor"""),
label = _(u'label_footer_viewlet', u'Footer Viewlet'),
rows = 25,
allow_file_upload = False),
),
TextField('colophonViewlet',
schemata ='viewlets',
required=False,
searchable=False,
validators = ('isTidyHtmlWithCleanup',),
allowable_content_types = ('text/html',),
default_content_type = 'text/html',
default_output_type = 'text/x-html-safe',
widget = RichWidget(
description = _(u'description_colophon_viewlet', u"""Override the colophon viewlet,
you can add images or links with rich editor"""),
label = _(u'label_colophon_viewlet', u'Colophon Viewlet'),
i18n_domain = I18N_DOMAIN,
rows = 25,
allow_file_upload = False),
),
BooleanField(
'displaySearchBoxViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_searchbox_viewlet',
u"""Do you want to display the searchbox viewlet with live search in header ?"""),
label = _(u'label_display_searchbox_viewlet', u'Display Searchbox ?'),
),
),
BooleanField(
'displayBreadCrumbsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_breadcrumbs_viewlet',
u"""Do you want to display the breadcrumbs viewlet in top of content ?"""),
label = _(u'label_display_breadcrumbs_viewlet', u'Display Bread Crumbs ?'),
),
),
BooleanField(
'displayGlobalSectionsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_globalsections_viewlet',
u"""Do you want to display the global sections viewlet (horizontal navigation at top) ?"""),
label = _(u'label_display_globalsections_viewlet', u'Display Global Sections ?'),
),
),
BooleanField(
'displayPersonalBarViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_personalbar_viewlet',
u"""Do you want to display the personal bar viewlet (links : login, preferences ...) ?"""),
label = _(u'label_display_personalbar_viewlet', u'Display Personal Bar ?'),
),
),
BooleanField(
'displaySiteActionsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_siteactions_viewlet',
u"""Do you want to display the site actions viewlet (links : site map, contact ...) ?"""),
label = _(u'label_display_siteactions_viewlet', u'Display Site Actions ?'),
),
),
BooleanField(
'displayDocumentActionsViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_documentactions_viewlet',
u"""Do you want to display the document actions viewlet (link: print, send this page ...) ?"""),
label = _(u'label_display_documentactions_viewlet', u'Display Document Actions ?'),
),
),
BooleanField(
'displayDocumentBylineViewlet',
schemata ='dynamic-viewlets',
default = True,
widget=BooleanWidget(
description = _(u'description_display_documentbyline_viewlet',
u"""Do you want to display the document by line viewlet for each content (author, date and keywords) ?"""),
label = _(u'label_display_documentbyline_viewlet', u'Display Document By Line ?'),
),
),
# fields for images
# logoName property is no more used in standard plone css
# so we make it invisible
StringField(
'logoName',
schemata ='images',
required=1,
widget=StringWidget(
label='Logo Name',
visible = {'view':'invisible', 'edit':'invisible'},
description = "Choose the logo file name, upload the image in the skin to overload it",
i18n_domain = I18N_DOMAIN,
),
),
StringField(
'backgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_background_image_name', u"""Enter the background image name for the page, upload the image in this skin"""),
label = _(u'label_background_image_name', u'Background Image Name'),
),
),
StringField(
'backgroundImagePosition',
schemata ='images',
default="top left",
vocabulary = [("top left", _(u"Top Left")),
("top right", _(u"Top Right")),
("top center", _(u"Top Center")),
("center left", _(u"Center Left")),
("center right", _(u"Center Right")),
("center center", _(u"Center Center")),
("bottom left", _(u"Bottom Left")),
("bottom right", _(u"Bottom Right")),
("bottom center", _(u"Bottom Center"))],
widget=SelectionWidget(
description = _(u'description_background_image_position', u"""Choose the background image position for the page"""),
label = _(u'label_background_image_position', u'Background Image Position'),
format='select',
),
),
StringField(
'backgroundImageRepeat',
schemata ='images',
default="no-repeat",
vocabulary = [("no-repeat", "No repeat"),
("repeat-x", "Horizontal Repeat"),
("repeat-y", "Vertical Repeat"),
("repeat", "mosaic repeat")],
widget=SelectionWidget(
description = _(u'description_background_image_repeat', u"""Choose the background image repeat for the page"""),
label = _(u'label_background_image_repeat', u'Background Image Repeat'),
format='select',
),
),
StringField(
'portalBackgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_portal_background_image_name', u"""Enter the background image name for the portal, upload the image in this skin"""),
label = _(u'label_portal_background_image_name', u'Portal Background Image Name'),
),
),
StringField(
'contentBackgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_content_background_image_name', u"""Choose the background image name for the content, upload the image in this skin"""),
label = _(u'label_contentl_background_image_name', u'Content Background Image Name'),
),
),
StringField(
'headerBackgroundImageName',
schemata ='images',
widget=StringWidget(
description = _(u'description_header_background_image_name', u"""Choose the background image name for the header, upload the image in this skin"""),
label = _(u'label_header_background_image_name', u'Header Background Image Name'),
),
),
# this property is never used is standard plone css
# so we make it invisible
StringField(
'portalMinWidth',
schemata ='dimensions',
widget=StringWidget(
label='Portal min width',
visible = {'view':'invisible', 'edit':'invisible'},
description = "Choose the portal min width in px em or %",
),
),
StringField(
'portalWidth',
schemata ='dimensions',
default = '100%',
widget=StringWidget(
description = _(u'description_portal_width', u"""Choose the portal min width in px em or %"""),
label = _(u'label_portal_width', u'Portal width'),
),
),
StringField(
'portalHorizontalPosition',
schemata ='dimensions',
default="",
vocabulary = [("0", _(u"undefined")),
("0 auto 0 auto", _(u"centered")),
("0 auto 0 0", _(u"on left")),
("0 0 0 auto", _(u"on right"))],
widget=SelectionWidget(
description = _(u'description_portal_horizontal_position', u"""Choose the position for portal"""),
label = _(u'label_portal_horizontal_position', u'Portal Horizontal Position'),
format='select',
),
),
StringField(
'columnOneWidth',
schemata ='dimensions',
required=1,
widget=StringWidget(
description = _(u'description_column_one_width', u"""Choose the column one width in px em or %"""),
label = _(u'label_column_one_width', u'Column One width'),
),
),
StringField(
'columnTwoWidth',
schemata ='dimensions',
required=1,
widget=StringWidget(
description = _(u'description_column_two_width', u"""Choose the column two width in px em or %"""),
label = _(u'label_column_two_width', u'Column Two width'),
),
),
StringField(
'fontFamily',
schemata ='fonts',
required=1,
widget=StringWidget(
description = _(u'description_font_family',
u"""Choose the font family"""),
label = _(u'label_font_family', u'Font Family'),
),
),
StringField(
'fontMainSize',
schemata ='fonts',
required=0,
widget=StringWidget(
description = _(u'description_font_main_size',
u"Choose the main font size in % (better) em px pt "
u"or using a keyword (xx-small, small, ...)"),
label = _(u'label_font_main_size', u'Font Main Size'),
),
),
StringField(
'fontSmallSize',
schemata ='fonts',
required=1,
widget=StringWidget(
description = _(u'description_font_small_size',
u"Choose the small font size in % (better) em px pt "
u"or using a keyword (xx-small, small, ...)"""),
label = _(u'label_font_small_size', u'Font Small Size'),
),
),
StringField(
'headingFontFamily',
schemata ='fonts',
required=1,
widget=StringWidget(
description = _(u'description_heading_font_family',
u"""Choose the font family for titles"""),
label = _(u'label_heading_font_family', u'Heading Font Family'),
),
),
StringField(
'textTransform',
schemata ='fonts',
required=1,
vocabulary = [("none", _(u"none")),
("uppercase", _(u"uppercase")),
("lowercase", _(u"lowercase")),
("capitalize", _(u"capitalize"))],
widget=SelectionWidget(
description = _(u'description_text_transform',
u"""Choose the text transformation for tabs and some headings"""),
label = _(u'label_text_transform', u'Text Transform'),
format='select',
),
),
StringField(
'fontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_font_color',
u"""Choose the font color"""),
label = _(u'label_font_color', u'Font Color'),
),
),
StringField(
'backgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_background_color',
u"""Choose the background color of the page"""),
label = _(u'label_background_color', u'Background Color'),
),
),
StringField(
'discreetColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_discreet_color',
u"""Choose the discreet color (can be used in content) """),
label = _(u'label_discreet_color', u'Discreet Color'),
),
),
StringField(
'portalBackgroundColor',
schemata ='colors',
default="transparent",
widget=SmartColorWidget(
description = _(u'description_portal_background_color',
u"""Choose the portal background color"""),
label = _(u'label_portal_background_color', u'Portal Background Color'),
),
),
StringField(
'contentBackgroundColor',
schemata ='colors',
default="transparent",
widget=SmartColorWidget(
description = _(u'description_content_background_color',
u"""Choose background color for content part of the page"""),
label = _(u'label_content_background_color', u'Content Background Color'),
),
),
StringField(
'personaltoolsBackgroundColor',
schemata ='colors',
default="#E3E3E3",
widget=SmartColorWidget(
description = _(u'description_personaltools_background_color',
u"""Choose background color for personal tools - language choice and user menu"""),
label = _(u'label_personaltools_background_color',
u"Personal tools Background Color"),
),
),
StringField(
'personaltoolsFontColor',
schemata ='colors',
default="#205C90",
widget=SmartColorWidget(
description = _(u'description_personaltools_font_color',
u"""Choose font color for personal tools - language choice and user menu"""),
label = _(u'label_personaltools_font_color',
u"Personal tools Font Color"),
),
),
StringField(
'headerBackgroundColor',
schemata ='colors',
default="transparent",
widget=SmartColorWidget(
description = _(u'description_header_background_color',
u"""Choose background color for the header"""),
label = _(u'label_header_background_color', u"Header Background Color"),
),
),
StringField(
'globalNavBackgroundColor',
schemata ='colors',
default="#dee7ec",
widget=SmartColorWidget(
description = _(u'description_global_nav_background_color',
u"""Choose the background color of global navigation"""),
label = _(u'label_global_nav_background_color', u'Global navigation Background Color'),
),
),
StringField(
'globalNavLinkColor',
schemata ='colors',
default="#205c90",
widget=SmartColorWidget(
description = _(u'description_global_nav_font_color',
u"""Choose the color of font and selected element background in global navigation"""),
label = _(u'label_global_nav_font_color', u'Global navigation Font Color'),
),
),
StringField(
'inputFontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_input_font_color',
u"""Choose the input fields font color"""),
label = _(u'label_input_font_color', u'Input Font Color'),
),
),
StringField(
'linkColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_link_color',
u"""Choose the color for links"""),
label = _(u'label_link_color', u'Link Color'),
),
),
StringField(
'linkVisitedColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_link_visited_color',
u"""Choose the color for visited links"""),
label = _(u'label_link_visited_color', u'Link Visited Color'),
),
),
StringField(
'linkActiveColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_link_active_color',
u"""Choose the color for active links"""),
label = _(u'label_link_active_color', u'Link Active/Hover Color'),
),
),
StringField(
'notifyBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_notify_background_color',
u"""Choose the notify background color (for portal messages)"""),
label = _(u'label_notify_background_color', u'Notify Background Color'),
),
),
StringField(
'notifyBorderColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_notify_border_color',
u"""Choose the notify border color"""),
label = _(u'label_notify_border_color', u'Notify Border Color'),
),
),
StringField(
'helpBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_help_background_color',
u"""Choose the bg color for help in forms"""),
label = _(u'label_help_background_color', u'Help Background Color'),
),
),
StringField(
'oddRowBackgroundColor',
schemata ='colors',
required=1,
default="#EEEEEE",
widget=SmartColorWidget(
description = _(u'description_odd_row_background_color',
u"""Choose the bg color for odd rows (tables, portlets)"""),
label = _(u'label__odd_row_background_color', u'Odd Row Background Color'),
),
),
StringField(
'evenRowBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_even_row_background_color',
u"""Choose the bg color for even rows (tables, portlets)"""),
label = _(u'label__even_row_background_color', u'Even Row Background Color'),
),
),
StringField(
'globalBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_global_background_color',
u"""Choose the global background color (used in tabs and portlets headers)"""),
label = _(u'label_global_background_color', u'Global Background Color'),
),
),
StringField(
'globalFontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_global_font_color',
u"""Choose the global font color"""),
label = _(u'label_global_font_color', u'Global Font Color'),
),
),
StringField(
'globalBorderColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_global_border_color',
u"""Choose the color for global borders"""),
label = _(u'label_global_border_color', u'Global Border Color'),
),
),
StringField(
'contentViewBackgroundColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_content_views_background_color',
u"""Choose the background color for content views tabs"""),
label = _(u'label_content_views_background_color', u'Content View Background Color'),
),
),
StringField(
'contentViewBorderColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_content_views_border_color',
u"""Choose the border color for content views tabs"""),
label = _(u'label_content_views_border_color', u'Content View Border Color'),
),
),
StringField(
'contentViewFontColor',
schemata ='colors',
required=1,
widget=SmartColorWidget(
description = _(u'description_content_views_font_color',
u"""Choose the font color for content views tabs"""),
label = _(u'label_content_views_font_color', u'Content View Font Color'),
),
),
StringField(
'listingHeadersFontColor',
schemata ='colors',
required=1,
default="#666666",
widget=SmartColorWidget(
description = _(u'description_listing_headers_font_color',
u"""Choose the font color for the text of listing headers"""),
label = _(u'label_listing_headers_font_color', u'Listing Headers Font Color'),
),
),
StringField(
'portletHeadersFontColor',
schemata ='colors',
required=1,
default="#000000",
widget=SmartColorWidget(
description = _(u'description_portlet_headers_font_color',
u"""Choose the font color for the text of portlet headers"""),
label = _(u'label_portlet_headers_font_color', u'Portlet Headers Font Color'),
),
),
StringField(
'borderStyle',
schemata ='borders',
required=1,
vocabulary = [("none", "no border"),
("hidden", "hidden when none is impossible (tables)"),
("solid", "solid"),
("dotted", "dotted"),
("dashed", "dashed"),
("groove","3D groove"),
("double", "double borders"),
("inset", "3D inset"),
("outset","3D outset"),
("ridge","3D ridge")],
widget=SelectionWidget(
description = _(u'description_border_style',
u"""Choose the global border style"""),
label = _(u'label_border_style', u'Border Style'),
format='select',
),
),
StringField(
'borderStyleAnnotations',
schemata ='borders',
required=1,
vocabulary = [("none", "no border"),
("hidden", "hidden when none is impossible (tables)"),
("solid", "solid"),
("dotted", "dotted"),
("dashed", "dashed"),
("groove","3D groove"),
("double", "double borders"),
("inset", "3D inset"),
("outset","3D outset"),
("ridge","3D ridge")],
widget=SelectionWidget(
description = _(u'description_border_style_annotations',
u"""Choose the border style for annotations """),
label = _(u'label_border_style_annotations', u'Border Style for Annotations'),
format='select',
),
),
StringField(
'borderWidth',
schemata ='borders',
required=1,
widget=StringWidget(
description = _(u'description_border_width',
u"""Choose the border width in px"""),
label = _(u'label_border_width', u'Border Width'),
),
),
BooleanField(
'overloadBody',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_body',
u"""Do you want to overload the body style ?"""),
label = _(u'label_overload_body', u'Overload Body Style'),
),
),
BooleanField(
'overloadHTMLTags',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_html_tags',
u"""Do you want to overload content styles (classic html tags) ?"""),
label = _(u'label_overload_html_tags', u'Overload HTML Tags Styles'),
),
),
BooleanField(
'overloadContent',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_content',
u"""Do you want to overload standard plone styles used for content ?"""),
label = _(u'label_overload_content', u'Overload Various Content Styles'),
),
),
BooleanField(
'overloadSiteActions',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_site_actions',
u"""Do you want to overload site actions styles ?"""),
label = _(u'label_overload_site_actions', u'Overload Site Actions Styles'),
),
),
BooleanField(
'overloadSearchBox',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_search_box',
u"""Do you want to overload search box styles ?"""),
label = _(u'label_overload_search_box', u'Overload Search Box Styles'),
),
),
BooleanField(
'overloadGlobalSections',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_global_sections',
u"""Do you want to overload global sections buttons styles ?"""),
label = _(u'label_overload_global_sections', u'Overload Global Sections Styles'),
),
),
BooleanField(
'overloadPersonalTools',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_personal_tools',
u"""Do you want to overload personal tools buttons styles (login, preferences ...) ?"""),
label = _(u'label_overload_personal_tools', u'Overload Personals Tools Styles'),
),
),
BooleanField(
'overloadBreadcrumbs',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_breadcrumbs',
u"""Do you want to overload breadcrumbs styles ?"""),
label = _(u'label_overload_breadcrumbs', u'Overload Breadcrumbs Styles'),
),
),
BooleanField(
'overloadFooter',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_footer',
u"""Do you want to overload footer styles ?"""),
label = _(u'label_overload_footer', u'Overload Footer Styles'),
),
),
BooleanField(
'overloadSiteMap',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_site_map',
u"""Do you want to overload site map styles ?"""),
label = _(u'label_overload_site_map', u'Overload Site Map Styles'),
),
),
BooleanField(
'overloadColumns',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_columns',
u"""Do you want to overload columns styles ?"""),
label = _(u'label_overload_columns', u'Overload Columns Styles'),
),
),
BooleanField(
'overloadForms',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_forms',
u"""Do you want to overload forms styles ?"""),
label = _(u'label_overload_forms', u'Overload Forms Styles'),
),
),
BooleanField(
'overloadPortlets',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_portlets',
u"""Do you want to overload portlets styles ?"""),
label = _(u'label_overload_portlets', u'Overload Portlets Styles'),
),
),
BooleanField(
'overloadCalendar',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_calendar',
u"""Do you want to overload calendar styles ?"""),
label = _(u'label_overload_calendar', u'Overload Calendar Styles'),
),
),
BooleanField(
'overloadNavtree',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_navtree',
u"""Do you want to overload navigation tree styles (impact sitemap + navtree portlet) ?"""),
label = _(u'label_overload_navtree', u'Overload Navigation Tree Styles'),
),
),
BooleanField(
'overloadAuthoring',
schemata ='plone-overloads',
default = True,
widget=BooleanWidget(
description = _(u'description_overload_authoring',
u"""Do you want to overload authoring styles (content views, actions etc ...) ?"""),
label = _(u'label_overload_authoring', u'Overload Authoring Styles'),
),
),
), marshall=RFC822Marshaller())
| [
"thomas.desvenain@gmail.com"
] | thomas.desvenain@gmail.com |
a842ae5ed2fa9404270a2b872f3c9f04a42ac434 | 2652fd6261631794535589427a384693365a585e | /trunk/workspace/Squish/src/TestScript/UI/suite_UI_51/tst_UI_51_Pref_BufferAutoView/test.py | e1a9d9fe2212331ae4697f3a3269cdded8842a9c | [] | no_license | ptqatester1/ptqa | 88c652380167f64a953bfd7a65041e7d8ac48c90 | 5b5997ea459e9aac17db8da2041e2af331927104 | refs/heads/master | 2021-01-21T19:06:49.275364 | 2017-06-19T03:15:00 | 2017-06-19T03:15:00 | 92,115,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | from API.Utility.Util import Util
from API.Utility import UtilConst
from API.MenuBar.Options.Options import Options
from API.MenuBar.Options.OptionsConst import OptionsConst
from API.MenuBar.Options.Preferences.Miscellaneous.MiscellaneousConst import MiscellaneousConst
from API.SimulationPanel.EventList.EventListConst import EventListConst
from API.SimulationPanel.EventListFilters.EventListFilters import EventListFilters
from API.SimulationPanel.PlayControls.PlayControlsConst import PlayControlsConst
from API.MenuBar.Options.Preferences.PreferencesConst import PreferencesConst
util = Util()
options = Options()
eventListFilters = EventListFilters()
def main():
util.init()
util.open("UI13.pkt", UtilConst.UI_TEST )
util.speedUpConvergence()
editOptionsSetting()
checkpoint1()
resetOptionsSetting()
def editOptionsSetting():
options.selectOptionsItem(OptionsConst.PREFERENCES)
util.clickTab(PreferencesConst.TAB_BAR, PreferencesConst.MISCELLANEOUS)
util.clickButton(MiscellaneousConst.AUTO_VIEW_PREVIOUS_EVENTS)
util.close(OptionsConst.OPTIONS_DIALOG)
def checkpoint1():
util.clickOnSimulation()
util.clickButton(EventListConst.RESET_SIMULATION)
for i in range(0, 8):
util.clickButton(PlayControlsConst.CAPTURE_FORWARD)
snooze(10)
if (object.exists(PlayControlsConst.BUFFER_FULL_DIALOG_LABEL)):
test.fail("Buffer window found")
else:
test.passes("Buffer window not found")
def resetOptionsSetting():
options.selectOptionsItem(OptionsConst.PREFERENCES)
util.clickTab(PreferencesConst.TAB_BAR, PreferencesConst.MISCELLANEOUS)
util.clickButton(MiscellaneousConst.PROMPT)
util.close(OptionsConst.OPTIONS_DIALOG) | [
"ptqatester1@gmail.com"
] | ptqatester1@gmail.com |
509c23e3bf72658ffd093ae405cf9de4958fb78f | 102d09ef1d6effe166ad703ba4472c45dfb03263 | /py/Maximum_Depth_of_Binary_Tree.py | 199982277744c0985b39cfc2326fc115a739fec4 | [] | no_license | bitcsdby/Codes-for-leetcode | 5693100d4b66de65d7f135bbdd81b32650aed7d0 | 9e24e621cfb9e7fd46f9f02dfc40a18a702d4990 | refs/heads/master | 2016-09-05T08:43:31.656437 | 2014-08-02T15:14:53 | 2014-08-02T15:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
if root == None:
return 0;
l = self.maxDepth(root.left) + 1;
r = self.maxDepth(root.right) + 1;
return l if l > r else r;
| [
"bitcsdby@gmail.com"
] | bitcsdby@gmail.com |
d0089bd15b2c1ffac1e167de02e3ee215da07c7b | 74698be74d244ebbabcb0b3cf17ebed26adfa37c | /orbit/utils/epoch_helper.py | 6eb110768887e95055c34f7fc3857f08a6b9c276 | [
"Apache-2.0"
] | permissive | lfads/models | aa75616fee2476641aa98ca1cbdce7e5d27a9aff | fd700f0cb2e104544c445d9fbf3991d8388ff18a | refs/heads/master | 2021-01-25T13:50:55.423010 | 2021-01-05T18:27:01 | 2021-01-05T18:27:01 | 123,619,512 | 16 | 9 | Apache-2.0 | 2021-01-05T18:27:02 | 2018-03-02T19:07:50 | Python | UTF-8 | Python | false | false | 2,136 | py | # Copyright 2020 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a utility class for training in epochs."""
import tensorflow as tf
class EpochHelper:
"""A helper class handle bookkeeping of epochs in custom training loops."""
def __init__(self, epoch_steps: int, global_step: tf.Variable):
"""Initializes the `EpochHelper` instance.
Args:
epoch_steps: An integer indicating how many steps are in an epoch.
global_step: A `tf.Variable` providing the current global step.
"""
self._epoch_steps = epoch_steps
self._global_step = global_step
self._current_epoch = None
self._epoch_start_step = None
self._in_epoch = False
def epoch_begin(self):
"""Returns whether a new epoch should begin."""
if self._in_epoch:
return False
current_step = self._global_step.numpy()
self._epoch_start_step = current_step
self._current_epoch = current_step // self._epoch_steps
self._in_epoch = True
return True
def epoch_end(self):
"""Returns whether the current epoch should end."""
if not self._in_epoch:
raise ValueError("`epoch_end` can only be called inside an epoch.")
current_step = self._global_step.numpy()
epoch = current_step // self._epoch_steps
if epoch > self._current_epoch:
self._in_epoch = False
return True
return False
@property
def batch_index(self):
"""Index of the next batch within the current epoch."""
return self._global_step.numpy() - self._epoch_start_step
@property
def current_epoch(self):
return self._current_epoch
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
97381c7e465346939150233934c4f52ea147926a | 1c6c127b7f22ad7c19b2cc0f265973711d6e3e01 | /bus/migrations/0002_auto_20200227_1737.py | 917f3db6cdb94721f19e9baa1456db73cb3805ac | [] | no_license | ramsheedrd/BusRoute | 1e0e7f94653c5fe6c6318dfe8079ed6b8f1b4c51 | bd61d37d16171777a28797195ff387b3fee57cde | refs/heads/master | 2022-04-16T09:44:07.774609 | 2020-04-05T12:31:21 | 2020-04-05T12:31:21 | 253,231,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | # Generated by Django 2.0.3 on 2020-02-27 12:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bus', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DistrictModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('district', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='placesmodel',
name='district',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bus.DistrictModel'),
preserve_default=False,
),
]
| [
"ramsheedkc10@gmail.com"
] | ramsheedkc10@gmail.com |
9d3339fbb1dea2a21cc7e3817513b96bac97ad71 | b03121e88c7c7b4738d6ab873cf030e7db140e32 | /utils/fact_memoization.py | 47630368952b26b22dc1746de79f0c20b8aab043 | [] | no_license | priestd09/project_euler | 0889a2038ee4ff17008169abea73f8a3bc74f713 | d6a04fbe42947ef0f9d9e26077c2b9b99069f4d1 | refs/heads/master | 2021-01-18T19:42:00.135047 | 2014-05-25T09:39:57 | 2014-05-25T09:39:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | #!/usr/bin/env python
'''
Created on 17 feb. 2012
@author: Julien Lengrand-Lambert
@contact: julien@lengrand.fr
'''
import timeit
from memoize import memoize
from Memoized import Memoized
def fact1(value):
"""
Returns value!
"""
if value == 0:
return 1
return value * fact1(value - 1)
@memoize
def fact2(value):
"""
Returns value!, using memoization
"""
if value == 0:
return 1
return value * fact2(value - 1)
@Memoized
def fact3(value):
"""
Returns value!, using memoization
"""
if value == 0:
return 1
return value * fact3(value - 1)
if __name__ == '__main__':
for i in range(3):
t1 = timeit.Timer("fact1(150)", "from __main__ import fact1")
t2 = timeit.Timer("fact2(150)", "from __main__ import fact2")
t3 = timeit.Timer("fact3(150)", "from __main__ import fact3")
print t1.timeit(1), t2.timeit(1), t3.timeit(1) | [
"julien@lengrand.fr"
] | julien@lengrand.fr |
b6d15f2f18092727dbf5772e4baf476f305ac60b | 5487607b21cf32d2a7664a8d28b5fd68ad434739 | /tysdgx_landinspector.py | 8415b2e51eb903369edd96af6ffddc5be39afd3d | [] | no_license | Fr0z3nKnights/landcloud | 081cfb29f8e2f1b3fb484f37108a86c1855bcb1c | f65b413bd616bc4b9ca641aefc5a1db0cee40cb5 | refs/heads/master | 2021-04-05T00:03:04.774268 | 2020-03-23T06:55:19 | 2020-03-23T06:55:19 | 248,504,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,168 | py | """
@author:Fr0z3n
@contact:websec@yeah.net
@datetime:2020/3/16
@desc:To get json data from url(https://landcloud.org.cn),and distribute personal task.
"""
from openpyxl import Workbook
from openpyxl import load_workbook
import requests
import os
import datetime
import re
# 屏蔽SSL报错,要求重新安装requests==2.7.0(version)
from requests.packages import urllib3
urllib3.disable_warnings()
class Tysdgx(object):
def __init__(self, pagecnt=52, assignfile="./tysdgx_data/assign.txt"):
# 总页数52页,每页100个编号
self.pagecnt = pagecnt
self.vlist_url = "https://jg.landcloud.org.cn:8090/webapi/api/vlist"
self.assign_url = "https://jg.landcloud.org.cn:8090/webapi/api//TaskDistribute"
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0",
"Authorization": "",# token cannot be null
"Origin": "https://jg.landcloud.org.cn:8090",
"Referer": "https://jg.landcloud.org.cn:8090/main/list/tysdgx/1/0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json;charset=utf-8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
}
self.cookies = {"482b4337498e47ae9e76f0e2271caf89": "WyIzODA1NTY1NTEwIl0"}
self.data = {
"ywlx": "tysdgx",
"xzqdm": "410922",
"level": 1,
"userxzqdm": "410922",
"pageSize": 100,
"pageIndex": 1,
"order": "",
"isUseCacheCount": True
}
self.tbbsm_li = {
"tbbsm": "",
"xmbh": "",
"xzqdm": "410922"
}
self.taskdata ={
"tbbsms": [],
"xzqdm": "410922",
"ywlx": "TYSDGX",
"sbbsms": [614864, 615084, 526084, 550264, 555304, 555284, 555264, 555504, 555604, 556624, 555324, 556264]
}
self.userdata = {
"username": "",
"password": "",
"verifyCode": ""
}
self.maxjctb = 5161
self.update_flag = True
self.assign_name = assignfile
self.datapath = "./tysdgx_data/TYSDGX_ALL_JCTB.xlsx"
self.auth = ''
# 登录
def landlogin(self):
if not os.path.exists("./tysdgx_data"):
os.mkdir("./tysdgx_data")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0"
}
host_url = 'https://landcloud.org.cn'
se = requests.session()
# 获取索引页面设置cookies
index = se.get(url=host_url + "/index", headers=headers, verify=False)
print("[ %s ]++网络状态:" % self.hms_now(), index.status_code)
if index.status_code == 200:
# , index.url, index.headers)
# print(response.content)
# dic = self.cookiejar2dic(se.cookies)
# print(dic)
# 获取登录页面
getlogin = se.get(url=host_url + "/login", cookies=se.cookies, verify=False)
print("[ %s ]++获取登录页面状态:getlogin status:" % self.hms_now(), getlogin.status_code)
patten = "<img id=\"verifycodeImage\"\s+src=\"(.*?)\"/>"
# 匹配登录页面上验证码图片的链接
match = re.findall(pattern=patten, string=getlogin.text)
print("[ %s ]++匹配到验证码链接:" % self.hms_now(), match[0])
veryimg_url = host_url + match[0]
dic2 = self.cookiejar2dic(se.cookies)
# print(veryimg_url)
print("[ %s ]++登录页面获得cookies:" % self.hms_now(), dic2)
# 获取验证码图片
get_verify_img = se.get(url=veryimg_url, cookies=se.cookies, headers=headers, verify=False)
print("[ %s ]++获取验证码链接图片:" % self.hms_now(), get_verify_img.status_code)
# print(get_verify_img.text)
dic3 = self.cookiejar2dic(se.cookies)
vfpath = "./tysdgx_data/%s.png" % (datetime.datetime.now().strftime("%y%m%d-%H%M%S") + "_" +dic3['timestamp'])
# 将获取图片写入文件
with open(vfpath, "wb") as fp:
fp.write(get_verify_img.content)
print("[ %s ]++更新页面获得cookies:" % self.hms_now(), dic3)
# 发送登录消息,等待登录验证通过
verifycode = input("[ %s ]++Please input the code you have identified:" % self.hms_now())
verifycode = verifycode.strip()
self.userdata['verifyCode'] = verifycode
if os.path.exists(vfpath): # 删除生成的.png文件
os.remove(vfpath)
print("[ %s ]++文件路径:%s" % (self.hms_now(), vfpath), "删除成功!")
signin_url = host_url + "/login/login.action"
getsignin = se.post(url=signin_url, cookies=se.cookies, params=self.userdata, headers=headers, verify=False)
# print(getsignin.url)
if getsignin.status_code == 200 and getsignin.json()['status'] == 'OK':
print("[ %s ]++登录页面返回信息:" % self.hms_now(), getsignin.json())
# 发送跨域消息,获取跨域的token即header中的Authorization
auth_url = host_url + "/third/proxy/getListDetailPageUrl?ywlx=TYSDGX&type=1&xzqdm=410922"
get_auth = se.get(url=auth_url, cookies=se.cookies, headers=headers, verify=False)
if get_auth.status_code == 200 and get_auth.json()['status'] == 'OK':
self.auth = re.findall(r"token=(.*?)$", get_auth.json()['data'])
print("[ %s ]++获取跨域token:" % self.hms_now(), self.auth)
return self.auth[0]
else:
return False
# cookiejar object to dictionary
def cookiejar2dic(self, cookies):
return requests.utils.dict_from_cookiejar(cookies)
# retrieve data from server
def rtv_data(self):
# 调用登录流程
self.headers['Authorization'] = "bearer " + self.landlogin()
print("[ %s ]++开始获取来自网络的数据并更新:" % self.hms_now())
filepath = self.datapath
jcbh2tbbsm = {}
# 判断是否存在文件
if os.path.exists(filepath):
wb = load_workbook(filepath)
ws = wb.active
# 判断是否需要更新文件
if self.maxjctb+1 <= ws.max_row:
print("[ %s ]++Excel中最大行数为%s,不需要更新数据!" % (self.hms_now(), ws.max_row))
wb.close()
return
else:
self.update_flag = True
a = 1
for row in ws.iter_rows():
if a == 1:
a += 1
continue
else:
# excel中workbook加载以后,下标从0开始,row[i].value为值
# print("+第%s行" % a, row[1].value, row[2].value)
jcbh2tbbsm[row[2].value] = row[1].value
a += 1
# print(jcbh2tbbsm)
print("[ %s ]网络数据大于本地数据,开始更新数据到本地:" % self.hms_now())
else:
self.update_flag = False
print("[ %s ]本地不存在excel数据,开始创建数据并更新,请注意检查数据页数!" % self.hms_now())
wb = Workbook()
# 激活 worksheet
ws = wb.active
# worksheet rename
ws.title = "TYSDGX_all_jctb" # + datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
# insert into worksheet by one row.
# Excel中的行列顺序
ws.append(["bsm", "tbbsm", "jcbh", "xzb", "yzb", "tblx", "wyzt"])
i = 1
for j in range(1, self.pagecnt+1):
self.data['pageIndex'] = j
r = requests.post(url=self.vlist_url, headers=self.headers,
json=self.data, cookies=self.cookies, verify=False)
# print(r.cookies)
# check if the back status is 200, and retrieve the json data.
if r.status_code == 200:
jdata = r.json()
# count = jdata['data']['allcount'] # int,the count number of all the records.
record = jdata['data']['records'] # list,and each one in list was a dict.
for li in record:
tbbsm = li['tbbsm'] # 图斑标识码
jcbh = li['jcbh'] # 监察编号
tblx = li['tblx']
bsm = li['bsm']
xzb = li['xzb']
yzb = li['yzb']
wyzt = li['wyzt']
if self.update_flag:
if jcbh in jcbh2tbbsm.keys():
i += 1
continue
else:
ws.append([bsm, tbbsm, jcbh, xzb, yzb, tblx, wyzt])
print("[ %s ]+添加第%s行" % (self.hms_now(), i), bsm, tbbsm, jcbh, xzb, yzb, tblx, wyzt)
i += 1
else:
ws.append([bsm, tbbsm, jcbh, xzb, yzb, tblx, wyzt])
print("[ %s ]+第%s行" % (self.hms_now(), i), bsm, tbbsm, jcbh, xzb, yzb, tblx, wyzt)
i += 1
else:
print("[ %s ]+请求数据状态码:%s,退出!" % (self.hms_now(), r.status_code))
wb.save(filepath)
wb.close()
print("[ %s ]+文件保存成功!位于%s." % (self.hms_now(), filepath))
# get nowtime in H:M:S
def hms_now(self):
return datetime.datetime.now().strftime("%H:%M:%S")
# get jctb from .txt
def data_matching(self):
print("[ %s ]++开始处理分发任务:" % self.hms_now())
print("[ %s ]++检查任务分发文件是否存在重复图斑?" % self.hms_now())
assignfilepath = self.assign_name
all_tb = {}
toassign = {} # 要分配任务的图斑字典
notfound = [] # 没发现的图斑列表
# 检查分配任务图斑文件是否存在
if os.path.exists(assignfilepath):
with open(assignfilepath, "r") as fp:
assign_tb = fp.readlines()
# print(assign_tb)
for tb in assign_tb:
tb = tb.strip()
else:
print("[ %s ]++不存在相关分配任务文件!" % self.hms_now())
return False
# 检查任务分配文件图斑重复项并处理
if len(set(assign_tb)) == len(assign_tb):
print("[ %s ]++检查结果:任务分发文件图斑无重复项." % self.hms_now())
else:
print("[ %s ]**********检查结果:分发任务文件图斑存在重复项,请检查!" % self.hms_now())
assign_tb = set(assign_tb)
print("[ %s ]++经处理后图斑列表为:!" % (self.hms_now()), assign_tb)
# 检查本地数据库文件是否存在
if os.path.exists(self.datapath):
wb = load_workbook(self.datapath)
ws = wb.active
b = 1
for row in ws.iter_rows():
if b == 1:
b += 1
continue
else:
all_tb[row[2].value] = row[1].value
b += 1
wb.close()
print("[ %s ]++读取本地数据库长度:%s." % (self.hms_now(), len(all_tb)))
# 开始匹配从txt读取图斑数是否都存在于网络端
for tb in assign_tb:
tb = tb.strip()
if tb in all_tb.keys():
toassign[tb] = all_tb[tb]
else:
print("[ %s ]+图斑%s未找到!" % (self.hms_now(), tb))
notfound.append(tb)
# 对于匹配结果进行处理
if len(assign_tb) == len(toassign):
# print("[ %s ]++匹配到图斑字典长度:" % self.hms_now(, toassign)
print("[ %s ]++匹配到全部要分发图斑共%s个,开始分发任务:" % (self.hms_now(), len(toassign)))
else:
print("[ %s ]**********未全部匹配到图斑数!" % self.hms_now())
nffilepath = "./tysdgx_data/notfound" + datetime.datetime.now().strftime("%y%m%d_%H%M%S") + ".txt"
with open(nffilepath, "w") as fp:
fp.writelines(notfound)
print("[ %s ]**********未匹配图斑文件位于:%s!" % (self.hms_now(), nffilepath))
return False
else:
print("[ %s ]**********不存在本地数据库文件%s,请检查!" % (self.hms_now(), self.datapath))
return False
# 要分配任务图斑在toassign,key为jcbh,value为tbbsm
return toassign
# 开始分发任务图斑到移动端
def begin_assignment(self):
# 先匹配数据
assigndata = self.data_matching()
c = 1
flag = 1
if assigndata:
remain = divmod(len(assigndata), 50)
for tb in assigndata.keys():
self.tbbsm_li['xmbh'] = tb
self.tbbsm_li['tbbsm'] = assigndata[tb]
self.taskdata['tbbsms'].append(self.tbbsm_li)
if (remain[0] == 0 and c == remain[1]) or (remain[0] < flag and divmod(c, 50)[1] == remain[1]):
# print(self.taskdata)
# 向网络请求数据分发任务图斑
res = requests.post(url=self.assign_url, json=self.taskdata, cookies=self.cookies, headers=self.headers, verify=False)
jdata = res.json()
print("[ {0} ]++第{1}波网络返回数据:{2},code:{3},error:{4},message:{5}!".format(self.hms_now(), flag, jdata['data'],jdata['code'],jdata['error'],jdata['message']))
return
elif remain[0] >= flag and divmod(c, 50)[1] == 0:
# 向网络请求数据分发任务图斑
res = requests.post(url=self.assign_url, json=self.taskdata, cookies=self.cookies, headers=self.headers, verify=False)
jdata = res.json()
self.taskdata['tbbsms'] = []
print("[ {0} ]++第{1}波50个网络返回数据:{2},code:{3},error:{4},message:{5}!".format(self.hms_now(), flag, jdata['data'],jdata['code'],jdata['error'],jdata['message']))
flag += 1
continue
c += 1
else:
print("[ %s ]**********匹配分发任务出现问题,请检查后重新运行!" % self.hms_now())
if __name__ == '__main__':
tysd = Tysdgx()
tysd.rtv_data()
tysd.begin_assignment()
| [
"webhack007@outlook.com"
] | webhack007@outlook.com |
98fc62623666acf20345eb356467064870a96c88 | a7265d9cff8f2a06257faa4155655524ff6047c0 | /utils/findAll_keyNouns.py | 6fb229482e25c2d901662b491f039080ed01b2d8 | [] | no_license | szr712/pinyin2hanziTest | f285afa7c12e548063f279d523303f43cf81687d | 2bd4d197d514b85e43ad525be07a8fc6bef3ad23 | refs/heads/master | 2023-07-17T18:52:54.519509 | 2021-08-25T13:03:44 | 2021-08-25T13:03:44 | 392,251,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py |
def findAll_keyNouns(pinyin, tones, subpinyin, subtones):
"""匹配关键词拼音在拼音串中的所有位置
Args:
pinyin (str): 被匹配的拼音字符串
tones (list)): 被匹配的拼音串对应的音调
subpinyin (str): 需匹配的关键词拼音串
subtones (list): 需匹配的关键词拼音串对应的音调
Returns:
list: 所有匹配结果的起始下标
"""
index = []
for i in range(0, len(pinyin)-len(subpinyin)+1):
sub = pinyin[i:i+len(subpinyin)]
if sub == subpinyin and tones[i:i+len(subpinyin)] == subtones:
index.append(i)
return index
if __name__ == "__main__":
import sys
sys.path.append("..")
from Convertor import Convertor
words = "【知识点】『互斥事件的概率加法公式互斥事件的概率加法公式』"
convertor = Convertor()
tar, sen = convertor.convert(words)
sub, _ = convertor.convert("公式")
print(sen)
index = findAll_keyNouns(
tar[0].pinyin, tar[0].yindiao, sub[0].pinyin, sub[0].yindiao)
print(index)
print(sen[0][index[0]:index[0]+2])
| [
"zirui990712@163.com"
] | zirui990712@163.com |
7df058d88c766a9b978227fca727c0f1587d9861 | 2e1322f72f730fdb019c25eb533424bfb411c7dc | /backend/garpix_page/contexts/default.py | 635931bdc671efeb46a46e470ae8ab5dc4d06058 | [
"MIT"
] | permissive | tempuku/garpix_page | 750d3ef78e1698d93564ae510a9514dfb815853f | d24fa3d8c7b0b4134e66795965596f3cdb61c8db | refs/heads/master | 2023-03-17T03:33:07.528207 | 2021-03-11T15:15:13 | 2021-03-11T15:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | def context(request, *args, **kwargs):
return {}
| [
"crusat@yandex.ru"
] | crusat@yandex.ru |
c575267f57e1c42a783b5c2275174b4427573cab | 9db916f28e718d4684db7e67a05573365a182e0b | /base/migrations/0002_freeresponseanswer_answer.py | c4be8ee654e41e8dfcf550bb36d6976aa5ae5b09 | [] | no_license | shobashiva/questionnaire_app | 243fee34950141315a9eb05ea4c26b3b677660ed | bd6c1c51215215c531944e99b29363ea87e98b62 | refs/heads/master | 2021-07-11T03:37:19.980030 | 2017-10-16T13:02:45 | 2017-10-16T13:02:45 | 107,127,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='freeresponseanswer',
name='answer',
field=models.TextField(null=True, blank=True),
),
]
| [
"shoba@apaxsoftware.com"
] | shoba@apaxsoftware.com |
d3341770a701c50c59469677b0108cffaf063137 | b6f892264a425278e84a01d12abb0e0082bddb1d | /orders/views.py | ad43e233d08f7352ea3703b89514611153bd17ae | [] | no_license | omar-bendary/Bookstore | 5059e264acc16389930cd640773171819fc8853c | 714630213bd29fd82ba9b4cf89089ca346c705be | refs/heads/master | 2022-12-31T18:14:49.860836 | 2020-10-25T23:39:17 | 2020-10-25T23:39:17 | 294,221,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | import stripe
from django.views.generic.base import TemplateView
from django.conf import settings
from django.shortcuts import render
stripe.api_key = settings.STRIPE_TEST_SECRET_KEY
class OrdersPageView(TemplateView):
template_name = 'orders/purchase.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['stripe_key'] = settings.STRIPE_TEST_PUBLISHABLE_KEY
return context
def charge(request):
if request.method == 'POST':
charge = stripe.Charge.create(
amount=3900,
currency='usd',
description='Purchase all books',
source=request.POST['stripeToken']
)
return render(request, 'orders/charge.html')
| [
"omarbendary@Omars-MacBook-Pro.local"
] | omarbendary@Omars-MacBook-Pro.local |
3a8f47db15b6d38c675be76ddcac8ebc077745ca | d2ad678fd648f516f3eaa4ee8d13825d3e07983d | /src/lib/Server.py | edcc7d53d75b5a62dfbcf0424aaa04936fd657da | [] | no_license | marcusagm/python-socketserver | 4affcda01e88b6f298486335f810e2624e25a139 | d4e6559716cead5c3f64acdb64f261d9fe8c32e0 | refs/heads/master | 2022-11-24T20:51:27.213763 | 2020-08-05T18:57:28 | 2020-08-05T18:57:28 | 285,364,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,044 | py | import time
import queue
import errno
import socket
import select
import threading
class Server:
def __init__(self, address = '127.0.0.1', port = 3000, bufferSize = 8192, timeout = 300):
self.address = address
self.port = int(port)
self.bufferSize = bufferSize
self.timeout = timeout
self.clients = []
self.lastPings = {}
self.receivedPackets = queue.Queue()
self.lastTimeoutCheck = time.time()
self.timeoutTimer = None
self.context = None
self.listener = None
self.isListening = True
self.isShutDown = threading.Event()
def start(self):
try:
self.context = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.context.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.context.bind((self.address, self.port))
# self.context.setblocking(False)
# self.context.settimeout(self.timeout)
self.log('Servidor iniciado!')
self.initTimeoutChecker()
self.isShutDown.clear()
while self.isListening == True:
selecteds = select.select([self.context], [], [])
if self.context in selecteds[0]:
self.processRequest()
except (KeyboardInterrupt, SystemExit):
pass
except Exception as error:
print(error)
finally:
self.stop()
self.isShutDown.set()
def shutdown(self):
self.stop()
self.isShutDown.wait()
def stop(self):
self.isListening = False
self.context.close()
def processRequest(self):
listener = threading.Thread(target=self.receiveData)
listener.start()
def receiveData(self):
data, client = self.context.recvfrom(self.bufferSize)
if data != '':
self.registerClient(client)
self.parseData(data, client)
def parseData(self, data, client):
response = data.decode('utf-8')
if response != '':
self.log('Endereço:', str(client), 'Data:', response)
if response == '::ping':
self.registerPing(client)
else:
self.receivedPackets.put((data,client))
self.sendToAll()
def sendToAll(self):
while not self.receivedPackets.empty():
data, client = self.receivedPackets.get()
for clientAddr in self.clients:
if clientAddr != client:
self.context.sendto( data, clientAddr)
def registerClient(self, client):
if client not in self.clients:
self.clients.append(client)
self.lastPings[str(client)] = time.time()
self.log('Endereço:', str(client), '- Conectou.')
def removeClient(self, client):
if client in self.clients:
self.clients.remove(client)
del self.lastPings[str(client)]
self.log('Endereço:', str(client), '- Conexão finalizada.')
def initTimeoutChecker(self):
if self.timeoutTimer == None:
self.timeoutTimer = threading.Timer(self.timeout, self.checkForTimeouts)
self.timeoutTimer.start()
def checkForTimeouts(self):
now = time.time()
self.timeoutTimer = None
if now - self.lastTimeoutCheck > self.timeout:
self.lastTimeoutCheck = time.time()
for client, pingTime in list(self.lastPings.items()):
if now - pingTime > self.timeout:
self.log('Endereço:', client, '- Timeout')
self.removeClient(eval(client))
self.initTimeoutChecker()
def registerPing(self, client):
self.context.sendto('::pong'.encode('utf-8'), client)
self.lastPings[str(client)] = time.time()
def now(self):
return time.ctime(time.time())
def log(self, *message):
print( self.now(), '-', ' '.join(message))
| [
"marcusagmaia@gmail.com"
] | marcusagmaia@gmail.com |
b3a11d9e0e3f539e355dc7095199b687e163277f | 4a71f8ee21d03c3308245fad1f81525934bd9a31 | /nlu_dir/online/utils/configs.py | f37bcb3ce4a1e823d29eb57bfd0f1bc155f1eb7d | [] | no_license | nwaiting/nlu_context | fe11f13fbf7165cdbc4914a2b92304cbc52eb8e4 | da63efa0523d487a3ec59acc5ebfebfb8c1ea84f | refs/heads/master | 2020-04-08T11:06:54.460129 | 2018-11-28T02:08:11 | 2018-11-28T02:08:11 | 159,293,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | from online.utils.nameutil import FieldClassifyNames as dcname,ContextNames as cname
from common.pathutil import PathUtil
_pathutil_class=PathUtil()
domain2entity2paths_set={dcname.product.value:
{cname.domain.value:[_pathutil_class.domain_filepath,
],
cname.property.value:[_pathutil_class.property_filepath,
]
},
dcname.company.value:
{cname.domain.value: [_pathutil_class.company_domain_filepath,
_pathutil_class.fufilled_company_domain_filepath
],
cname.property.value: [_pathutil_class.company_property_filepath
]
}
}
_environ='local'
# _environ='online' | [
"zhengyuyu3@jd.com"
] | zhengyuyu3@jd.com |
81e7392eb7df7d48b1169c1670899bdb02ff1c0b | c35a85e98774d10684fd1ef3a27328d0f005b260 | /restapi/tasks.py | 6a6cd1e392142d1b0180592001ed1fb212a05a9d | [] | no_license | unichainplatform/unichain-monitor-web | ba110ec2bf7d71b8df952a1ec866ab4a411b295b | 97a0f01698102f3b88de60e1dcfb5a7c7731e2f3 | refs/heads/master | 2023-08-04T04:01:25.853165 | 2020-03-03T09:58:21 | 2020-03-03T09:58:21 | 240,002,198 | 1 | 0 | null | 2023-07-23T05:26:05 | 2020-02-12T12:00:17 | Python | UTF-8 | Python | false | false | 208 | py | from celery import shared_task
from fabric.api import execute
from restapi.builder import start
from time import sleep
@shared_task
def build():
execute(start)
@shared_task
def sleepp():
sleep(20) | [
"zequnfeng@gmail.com"
] | zequnfeng@gmail.com |
d42a3b73ca399156ab41efeaf04eb9a3c00c8126 | c2dc0e59980bb9141bc2459cc37cfeacc0d9bb75 | /lumicks/pylake/detail/widefield.py | 3d0eacab79adf0090e29ee7c4f35ce26afbbfaf5 | [
"Apache-2.0"
] | permissive | spangeni/pylake | fc5c69548aaa2f037f2f636dc6a5fb2ea0989132 | 7764928020264571aa7a0d8c5c9c0cb5d225377f | refs/heads/master | 2023-06-16T04:57:40.324040 | 2021-07-05T16:51:54 | 2021-07-05T18:48:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,705 | py | import numpy as np
import re
import json
import cv2
import tifffile
import warnings
from copy import copy
class TiffFrame:
"""Thin wrapper around a TIFF frame stack. For camera videos timestamps are stored in the DateTime tag in
the format start:end.
Parameters
----------
page : tifffile.tifffile.TiffPage
Tiff page recorded from a camera in Bluelake.
"""
def __init__(self, page, align):
self._src = page
self._description = ImageDescription(page)
self._align = align
def _align_image(self):
""" reconstruct image using alignment matrices from Bluelake; return aligned image as a NumPy array"""
if not self._description:
warnings.warn("File does not contain metadata. Only raw data is available")
return self.raw_data
try:
align_mats = [self._description.alignment_matrix(color) for color in ("red", "blue")]
except KeyError:
warnings.warn("File does not contain alignment matrices. Only raw data is available")
return self.raw_data
img = self.raw_data
rows, cols, _ = img.shape
for mat, channel in zip(align_mats, (0, 2)):
img[:, :, channel] = cv2.warpAffine(
img[:, :, channel],
mat,
(cols, rows),
flags=(cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP),
borderMode=cv2.BORDER_CONSTANT,
borderValue=0,
)
return img
@property
def data(self):
return self._align_image() if (self.is_rgb and self._align) else self._src.asarray()
@property
def raw_data(self):
return self._src.asarray()
@property
def bit_depth(self):
bit_depth = self._src.tags["BitsPerSample"].value
if self.is_rgb: # (int r, int g, int b)
return bit_depth[0]
else: # int
return bit_depth
@property
def is_rgb(self):
return self._src.tags["SamplesPerPixel"].value == 3
def _get_plot_data(self, channel="rgb", vmax=None):
"""return data an numpy array, appropriate for use by `imshow`
if data is grayscale or channel in ('red', 'green', 'blue')
return data as is
if channel is 'rgb', converted to float in range [0,1] and correct for optional vmax argument:
None : normalize data to max signal of all channels
float : normalize data to vmax value
"""
if not self.is_rgb:
return self.data
if channel.lower() == "rgb":
data = (self.data / (2 ** self.bit_depth - 1)).astype(float)
if vmax is None:
return data / data.max()
else:
return data / vmax
else:
try:
return self.data[:, :, ("red", "green", "blue").index(channel.lower())]
except ValueError:
raise ValueError(f"'{channel}' is not a recognized channel")
@property
def start(self):
timestamp_string = re.search(r"^(\d+):\d+$", self._src.tags["DateTime"].value)
return np.int64(timestamp_string.group(1)) if timestamp_string else None
@property
def stop(self):
timestamp_string = re.search(r"^\d+:(\d+)$", self._src.tags["DateTime"].value)
return np.int64(timestamp_string.group(1)) if timestamp_string else None
class TiffStack:
"""TIFF images exported from Bluelake
Parameters
----------
tiff_file : tifffile.TiffFile
TIFF file recorded from a camera in Bluelake.
"""
def __init__(self, tiff_file, align):
self._src = tiff_file
self._align = align
def get_frame(self, frame):
return TiffFrame(self._src.pages[frame], align=self._align)
@staticmethod
def from_file(image_file, align):
return TiffStack(tifffile.TiffFile(image_file), align=align)
@property
def num_frames(self):
return len(self._src.pages)
class ImageDescription:
def __init__(self, src):
try:
self.json = json.loads(src.description)
except json.decoder.JSONDecodeError:
self.json = {}
self._cmap = {}
return
self._cmap = {"red": 0, "green": 1, "blue": 2}
# update format if necessary
if "Alignment red channel" in self.json:
for color, j in self._cmap.items():
self.json[f"Channel {j} alignment"] = self.json.pop(f"Alignment {color} channel")
self.json[f"Channel {j} detection wavelength (nm)"] = "N/A"
def __bool__(self):
return bool(self.json)
@property
def alignment_roi(self):
return np.array(self.json["Alignment region of interest (x, y, width, height)"])
@property
def roi(self):
return np.array(self.json["Region of interest (x, y, width, height)"])
@property
def offsets(self):
return self.alignment_roi[:2] - self.roi[:2]
def _raw_alignment_matrix(self, color):
return np.array(self.json[f"Channel {self._cmap[color]} alignment"]).reshape((2, 3))
def alignment_matrix(self, color):
def correct_alignment_offset(alignment, x_offset, y_offset):
# translate the origin of the image so that it matches that of the original transform
translation = np.eye(3)
translation[0, -1] = -x_offset
translation[1, -1] = -y_offset
# apply the original transform to the translated image.
# it only needs to be resized from a 2x3 to a 3x3 matrix
original = np.vstack((alignment, [0, 0, 1]))
# translate the image back to the original origin.
# takes into account both the offset and the scaling performed by the first step
back_translation = np.eye(3)
back_translation[0, -1] = original[0, 0] * x_offset
back_translation[1, -1] = original[1, 1] * y_offset
# concatenate the transforms by multiplying their matrices and ignore the unused 3rd row
return np.dot(back_translation, np.dot(original, translation))[:2, :]
align_mat = self._raw_alignment_matrix(color)
x_offset, y_offset = self.offsets
if x_offset == 0 and y_offset == 0:
return align_mat
else:
return correct_alignment_offset(align_mat, x_offset, y_offset)
@property
def for_export(self):
out = copy(self.json)
if self:
for j in range(3):
out[f"Applied channel {j} alignment"] = out.pop(f"Channel {j} alignment")
return json.dumps(out, indent=4)
| [
"61475504+rpauszek@users.noreply.github.com"
] | 61475504+rpauszek@users.noreply.github.com |
0aa81db5fca950f4936b5d5082166f4d30a34ab1 | 81a9529137c7361189965684764470f8f2d92f38 | /yowsup/layers/protocol_chatstate/protocolentities/test_chatstate.py | c7b7c48b966da8e87344b5d15e81017b1b246adb | [
"MIT"
] | permissive | pasinit/yowsup | 670cd34e0d4e189394da97120ad2e76e201715fd | 894007650bf3d75ef7af4a0e57e84dc7cccc4dfe | refs/heads/master | 2021-06-01T17:50:29.866854 | 2014-12-20T11:29:30 | 2014-12-20T11:29:30 | 28,185,000 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from yowsup.layers.protocol_chatstate.protocolentities.chatstate import ChatstateProtocolEntity
from yowsup.structs import ProtocolTreeNode
from yowsup.structs.protocolentity import ProtocolEntityTest
class ChatstateProtocolEntityTest(ProtocolEntityTest):
def setUp(self):
self.ProtocolEntity = ChatstateProtocolEntity
self.node = ProtocolTreeNode("chatstate")
self.node.addChild(ProtocolTreeNode('composing'))
| [
"g@almeida.io"
] | g@almeida.io |
95f90ed48904d30f93ba5e9307af06e67327876f | 0d39012599f8bf1a43af0cdc9fc6ec0127f69e89 | /mini_url/urls.py | 98e42ab572d830fdb785098bbd55c71fc9688a12 | [] | no_license | Seifeddine-Selmi/django-tuto | 0a815fef93367247d508361bf7e8047a9eb5a1c2 | 3054cc5e4ebeddedb9e80eb89d0fc36827ba9f2e | refs/heads/master | 2021-01-14T08:35:48.288865 | 2017-02-21T17:35:57 | 2017-02-21T17:35:57 | 81,965,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | #-*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
# Une string vide indique la racine
#url(r'^$', views.url_list, name='url_list'),
#url(r'^new_url$', views.new_url, name='url_create'),
# (?P<code>\w{6}) capturera 6 caractères alphanumériques.
url(r'^(?P<code>\w{6})/$', views.url_redirection, name='url_redirection'),
### Views generic CRUD ###
url(r'^$', views.ListUrl.as_view(), name='url_list'),
url(r'^url$', views.URLCreate.as_view(), name='url_create'),
#url(r'^edit/(?P<pk>\d+)$', views.URLUpdate.as_view(), name='url_update'),
url(r'^edit/(?P<code>\w{6})$', views.URLUpdate.as_view(), name='url_update'),
url(r'^delete/(?P<code>\w{6})$', views.URLDelete.as_view(), name='url_delete')
] | [
"selmi.seifeddine19@gmail.com"
] | selmi.seifeddine19@gmail.com |
e7a491c97fbec62daeb6c02fbf47d0ade5a13322 | 9fb90dc8be1b88b3904d863880b35041724cca9b | /src/contrib/multiply.py | 9dd28fdcdda22d67a8f009e52922d83804726d55 | [
"Apache-2.0"
] | permissive | pombredanne/hub-1 | 3bf6ea03b11b5c61185ba06b5a58eb203a3d95f3 | ba52c85c1cd2974bd4199bf51ce0678c5eff44e1 | refs/heads/master | 2020-12-14T06:17:03.443350 | 2013-04-10T20:40:35 | 2013-04-10T20:40:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | #!/usr/bin/env python
from api import task
@task
def multiply(arg1, arg2):
return arg1 * arg2
| [
"kris@automationlogic.com"
] | kris@automationlogic.com |
2965132d2280007b2cb37deb496ec32835affabc | e510582d3c00442f80d2872d369e16032c7b629f | /5-19(Reward).py | 239dc412ef824052d486754549f01e08426d1b78 | [] | no_license | JangJur/Python-Practice | 044885b5c50d4fe9a4a7b2c05326bef2faa4bba2 | 3adda0f4a6bc6f730fbb89baa22ad3d8f706dde3 | refs/heads/master | 2020-04-26T12:38:26.640557 | 2019-03-07T07:11:58 | 2019-03-07T07:11:58 | 173,434,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | p_year = 1988
r_year = 2016
b_money = 50000000
a_money = 1100000000
rate = 0.12
i = 1
while i <= (r_year - p_year):
b_money *= (1 + rate)
i += 1
if b_money > a_money:
print("%d원 차이로 동일 아저씨의 말씀이 맞습니다." % (b_money - a_money))
elif b_money < a_money:
print("%d원 차이로 미란 아주머니의 말씀이 맞습니다." % (a_money - b_money))
else:
print("두분 다 말씀이 맞습니다.") | [
"wkdwndud753@naver.com"
] | wkdwndud753@naver.com |
58596698f6381143d4fb47cfddea1dde38e4dc79 | d0f28dccc15804d4426bdb79c964fbc69206ec21 | /utils/pandas_test.py | 72e02ae50ce149d0dcce7a1e69b42df9f99ac909 | [] | no_license | INKWWW/python_project | f4170b435a2d08fa931c1ebff4b45bfa172181a6 | 0e4647d09ec4a7a5ea4d14863c2628402b3830d4 | refs/heads/master | 2020-04-02T08:29:54.970819 | 2019-04-13T07:52:54 | 2019-04-13T07:52:54 | 154,247,147 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pandas practice'''
import pdb
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
# s = pd.Series(list('abca'))
# print(s)
# print(pd.get_dummies(s))
# pdb.set_trace()
df = pd.DataFrame(np.arange(16).reshape((4,4)),index=['a','b','c','d'],columns=['one','two','three','four'])
print(df)
print('-----------------')
# print(df.loc[:, 'one':'three'])
### 取列
# print(df.four) # 等同于:print(df.loc[:,['three', 'four']]) 等同于:print(df['four'])
# print(df.loc[:,['three', 'four']])
# print(df.loc[:,'four'])
# print(df['four'])
### 取一个值
# print(df.loc['a','one'])
### 测试独热编码
# add a new column
df['color'] = ['red', 'green', 'yellow', 'blue']
df['cate'] = ['1', '2', '3' ,'4']
print(df)
# df['color'] = LabelEncoder().fit_transform(df['color'])
# print(df)
# # print(type(df_label))
# df['color'] = OneHotEncoder().fit_transform(df['color'].values.reshape(-1,1)).toarray()
# print(df['color'])
# print(df)
print(df.iloc[:, 2].dtype)
# df_dummy = pd.get_dummies(df, columns=['color'])
df_dummy = pd.get_dummies(df)
print(df_dummy)
df_dummy['new'] = 1
print(df_dummy)
# print(df_dummy[0:2]) # 取第一行和第二行
# print(df_dummy[:2]) # 取第一行和第二行
# print(df_dummy.iloc[:,0:2])
| [
"whmink@foxmail.com"
] | whmink@foxmail.com |
e1c496b08d51cf84a89f1bf972424ec6ed19e4fc | 371ec3b00088f54ef703899048c308ff81e7b78b | /python基础-06/08-数据读取.py | dbcbfe52ce9a33bd2e44bebd13cb963de1b41121 | [] | no_license | zhangninggit/pythonjichu | d1fd428688d05e1f34f565e0bfd530d33f65e7cb | 954c935dfcdc8674d488cfa599934bf2e2be79c2 | refs/heads/master | 2020-04-04T05:19:36.417438 | 2018-11-02T05:16:43 | 2018-11-02T05:16:43 | 155,733,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | f = open("xxx.txt","r")
co = f.read()
print(co)
f.close()
| [
"zhangning2017@163.com"
] | zhangning2017@163.com |
a1d4abecf6810db269fd34712f27f857a5b34edc | 7f57aa11f807c1d440b46c94bb4b493a1540ce55 | /SIIC/usuarios/urls.py | 16968d1a51ba2bd1444fb173db896c6bd0f0f306 | [] | no_license | Aletrip-dev/OPE2 | 0a498a266bd78d16a0568686004023d72e57c506 | 9ed21a69c30d6c376a6a0c05ff0586f2a747aacf | refs/heads/master | 2023-04-20T10:44:58.204839 | 2021-05-15T20:44:42 | 2021-05-15T20:44:42 | 343,137,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | from django.urls import path
# MÓDUDO PARA AUTENTICAÇÃO DE USUÁRIOS
# (passar um alias para não conflitar com a views da aplicação)
from django.contrib.auth import views as auth_views
from .views import UsuarioCreate, PerfilUpdate, alterar_senha
urlpatterns = [
path('login/', auth_views.LoginView.as_view(
template_name='usuarios/login.html'
), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('registrar/', UsuarioCreate.as_view(), name='registrar'),
path('atualizar-dados/', PerfilUpdate.as_view(), name='atualizar-dados'),
path('alterar-senha/', alterar_senha, name='alterar-senha'),
]
| [
"aletrip@msn.com"
] | aletrip@msn.com |
b804ac79cb9db91690da307eee3873b42d5a9662 | f4830205e1ee5d2adfa1ed3be62deb89413ca34f | /datastructures/tuples/tuple-sort.py | a499ff8c341a1162819eef519abd7de6e72b8065 | [] | no_license | sarathcakurathi/pyscripts | 362e53a943fa9ff92bac096dea232fc27eed09d1 | 93f8680fdb5f5ad9b688dd58fb5263132df770f7 | refs/heads/master | 2020-07-12T00:25:29.978478 | 2019-08-27T13:12:43 | 2019-08-27T13:12:43 | 204,674,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #!/bin/python
# Sort list of tuples using second element in tuple
input_list = [(5,6), (4,2,3), (1,4)]
input_list.sort(key = lambda x: x[1])
print(input_list) | [
"sarath.c.akurathi@gmail.com"
] | sarath.c.akurathi@gmail.com |
ec453b7eab4fe6e2560531b8c90b49e805794a0c | f886045c7c8a3d457cf12f6a6df345ccc23ea2e5 | /çalışankayıtokuma.py | 934aeee9fc3b477d08c281d969731c34ba66fa9d | [] | no_license | alpkaantanis/alp | 3041e9917dfe4536ce1f56497c01fcf605944d4b | 137e3501da0a5faaa59d5009ef4aa1c7b152fc01 | refs/heads/master | 2023-04-22T08:27:04.521559 | 2021-05-17T19:17:03 | 2021-05-17T19:17:03 | 368,295,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | def main():
calisan_dosyasi=open('calisanlar.txt','r')
isim=calisan_dosyasi.readline()
while isim !='':
sicil=calisan_dosyasi.readline()
bolum=calisan_dosyasi.readline()
isim=isim.rstrip('\n')
sicil=sicil.rstrip('\n')
bolum=bolum.rstrip('\n')
print('isim:',isim)
print('sicil:',sicil)
print('bolum:',bolum)
print()
isim=calisan_dosyasi.readline()
calisan_dosyasi.close()
main()
| [
"alpkaantanis@gmail.com"
] | alpkaantanis@gmail.com |
76795df80dc41e3af67b42a73904e5675010ac9a | ccb8b706ca940c86ce96a212f01797586db90158 | /run | 67464cba3cc4dd0c42fe1537dee80d4a70807432 | [] | no_license | ChrisWaites/theorem-prover | 35e4733309bb2d5fc814a7634a991868712d7940 | ae7f3fd474c81531dd1d77ebbae2c7ff353e029b | refs/heads/master | 2021-09-10T09:09:50.263916 | 2018-03-23T09:35:26 | 2018-03-23T09:35:26 | 50,534,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | #!/usr/bin/env python
import argparse
from theoremprover.theorem_search import find_theorem, peanos_axioms
from theoremprover.expression import parse
def main(args):
axioms = set(map(parse, args.axioms))
theorem = parse(args.theorem)
path = find_theorem(theorem, axioms)
print(path[0])
for t in path[1:]:
print("->")
print(t)
def parse_args():
parser = argparse.ArgumentParser(description='Given a set of axioms attempts to prove or disprove a given theorem using propositional logic and number theory.')
parser.add_argument('-a', '--axioms', type=str, nargs='+', default=set(map(str, peanos_axioms)), help='axioms of formal system [default: peano\'s axioms]')
parser.add_argument('-t', '--theorem', type=str, default="~(Ea((0)=((a)+(1))))", help='theorem to be proved or disproved [default: ~(Ea((0)=((a)+(1))))]')
return parser.parse_args()
if __name__ == "__main__":
main(parse_args())
| [
"cwaites3@gatech.edu"
] | cwaites3@gatech.edu | |
71e6161a0479a61818760f02db11ace705c0070d | cdf9cad13361d7cb2306dd5ed1adfad227c5a5aa | /services_infrastructure/data-provider/app/extensions/logger.py | c1bbc11ce335878a3909fcbb1eaf849fdf209be6 | [] | no_license | MDUYN/reference-architecture-consent-management-in-data-spaces | ab32ec810ba429918f9125d66f323dc4ff5bdad0 | 6d8f0fd733c88954f7ce9694c2574882d39eb48c | refs/heads/main | 2023-01-21T12:32:16.519723 | 2020-12-02T16:47:32 | 2020-12-02T16:47:32 | 315,774,537 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | import logging.config
def setup_logging():
logging_config = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout', # Default is stderr
},
},
'loggers': {
'': { # root logger
'handlers': ['console'],
'level': 'WARNING',
'propagate': False
},
'app': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False
},
}
}
logging.config.dictConfig(logging_config)
| [
"marcvanduyn1@gmail.com"
] | marcvanduyn1@gmail.com |
0f14427239d695935ab94dbda667716278b75906 | 4b3e7fba33d7f93b229d676df9e2c52307e884a1 | /velruse/views/facebook.py | 94e927f03518a57de014eca52e63d1dee2722b54 | [
"MIT"
] | permissive | stoiczek/velruse | 5b5e82e098850a0dc99d1d04f201051178f58769 | 79f219cfd75423cf16b2c327abec303d0edea547 | refs/heads/master | 2020-12-25T16:13:48.048229 | 2011-11-09T11:49:12 | 2011-11-09T11:49:12 | 2,372,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,074 | py | """Facebook Authentication Views"""
import uuid
from urlparse import parse_qs
from pyramid.httpexceptions import HTTPFound
from simplejson import loads
import requests
from velruse.exceptions import AuthenticationComplete
from velruse.exceptions import AuthenticationDenied
from velruse.exceptions import CSRFError
from velruse.exceptions import ThirdPartyFailure
from velruse.parsers import extract_fb_data
from velruse.utils import flat_url
def includeme(config):
config.add_route("facebook_login", "/facebook/login")
config.add_route("facebook_process", "/facebook/process")
config.add_view(facebook_login, route_name="facebook_login")
config.add_view(facebook_process, route_name="facebook_process")
def facebook_login(request):
"""Initiate a facebook login"""
config = request.registry.settings
scope = config.get('velruse.facebook.scope',
request.POST.get('scope', ''))
request.session['state'] = state = uuid.uuid4().hex
fb_url = flat_url('https://www.facebook.com/dialog/oauth/', scope=scope,
client_id=config['velruse.facebook.app_id'],
redirect_uri=request.route_url('facebook_process'),
state=state)
return HTTPFound(location=fb_url)
def facebook_process(request):
"""Process the facebook redirect"""
if request.GET.get('state') != request.session.get('state'):
raise CSRFError("CSRF Validation check failed. Request state %s is "
"not the same as session state %s" % (
request.GET.get('state'), request.session.get('state')
))
config = request.registry.settings
code = request.GET.get('code')
if not code:
reason = request.GET.get('error_reason', 'No reason provided.')
raise AuthenticationDenied(reason)
# Now retrieve the access token with the code
access_url = flat_url('https://graph.facebook.com/oauth/access_token',
client_id=config['velruse.facebook.app_id'],
client_secret=config['velruse.facebook.app_secret'],
redirect_uri=request.route_url('facebook_process'),
code=code)
r = requests.get(access_url)
if r.status_code != 200:
raise ThirdPartyFailure("Status %s: %s" % (r.status_code, r.content))
access_token = parse_qs(r.content)['access_token'][0]
# Retrieve profile data
graph_url = flat_url('https://graph.facebook.com/me',
access_token=access_token)
r = requests.get(graph_url)
if r.status_code != 200:
raise ThirdPartyFailure("Status %s: %s" % (r.status_code, r.content))
fb_profile = loads(r.content)
profile = extract_fb_data(fb_profile)
# Create and raise our AuthenticationComplete exception with the
# appropriate data to be passed
complete = AuthenticationComplete()
complete.profile = profile
complete.credentials = { 'oauthAccessToken': access_token }
raise complete
| [
"ben@groovie.org"
] | ben@groovie.org |
ad9a3b50ae05c454484d9697933ee5e00f730b4a | 5dd7c4ec44b76180040badc67849ad44f81690f9 | /unittests/test_stockitem.py | 751eb41a7c209613f1a6e803ac526f15a85a3c77 | [] | no_license | myluco/Phoenix | 68f9abe15a673fe56da6ef4375849ba6a642622d | 2de746beda35b8b5db547658cae1c65cfe164039 | refs/heads/master | 2021-01-18T15:59:05.001240 | 2016-12-04T00:08:36 | 2016-12-04T00:08:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | import unittest
from unittests import wtc
import wx
#---------------------------------------------------------------------------
class stockitem_Tests(wtc.WidgetTestCase):
# TODO: Remove this test and add real ones.
def test_stockitem1(self):
self.fail("Unit tests for stockitem not implemented yet.")
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| [
"robin@alldunn.com"
] | robin@alldunn.com |
6c10278bce7d441831f59503418233abcba5dee8 | 17c14b758959cdceec0dce8f783346fdeee8e111 | /chap05_nlp/automl/train.py | bca8b1fd41ce03b243523430bdc8d09621f7daa4 | [] | no_license | yurimkoo/tensormsa_jupyter | b0a340119339936d347d12fbd88fb017599a0029 | 0e75784114ec6dc8ee7eff8094aef9cf37131a5c | refs/heads/master | 2021-07-18T12:22:31.396433 | 2017-10-25T01:42:24 | 2017-10-25T01:42:24 | 109,469,220 | 1 | 0 | null | 2017-11-04T05:20:15 | 2017-11-04T05:20:15 | null | UTF-8 | Python | false | false | 3,650 | py | """
Utility used by the Network class to actually train.
Based on:
https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
"""
from keras.datasets import mnist, cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
# Helper: Early stopping.
early_stopper = EarlyStopping(patience=5)
def get_cifar10():
"""Retrieve the CIFAR dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = 64
input_shape = (3072,)
# Get the data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(50000, 3072)
x_test = x_test.reshape(10000, 3072)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def get_mnist():
"""Retrieve the MNIST dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = 128
input_shape = (784,)
# Get the data.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def compile_model(network, nb_classes, input_shape):
"""Compile a sequential model.
Args:
network (dict): the parameters of the network
Returns:
a compiled network.
"""
# Get our network parameters.
nb_layers = network['nb_layers']
nb_neurons = network['nb_neurons']
activation = network['activation']
optimizer = network['optimizer']
model = Sequential()
# Add each layer.
for i in range(nb_layers):
# Need input shape for first layer.
if i == 0:
model.add(Dense(nb_neurons, activation=activation, input_shape=input_shape))
else:
model.add(Dense(nb_neurons, activation=activation))
model.add(Dropout(0.2)) # hard-coded dropout
# Output layer.
model.add(Dense(nb_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
return model
def train_and_score(network, dataset):
"""Train the model, return test loss.
Args:
network (dict): the parameters of the network
dataset (str): Dataset to use for training/evaluating
"""
if dataset == 'cifar10':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_cifar10()
elif dataset == 'mnist':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_mnist()
model = compile_model(network, nb_classes, input_shape)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=10000, # using early stopping, so no real limit
verbose=0,
validation_data=(x_test, y_test),
callbacks=[early_stopper])
score = model.evaluate(x_test, y_test, verbose=0)
return score[1] # 1 is accuracy. 0 is loss. | [
"tmddno1@naver.com"
] | tmddno1@naver.com |
fbf27bd442478db50ae76d53ff9eeda876dab945 | 35150c23e611588b506851d7bb33049682e667f2 | /hotelapp/dao.py | 0fce97739b963a380a00e047225a25e2bcc68375 | [] | no_license | hienhuynhxuan/HotelManager_KO | 4fd9dda2dc21a6d7254884b215af02c12af3d2c2 | f0f362be1ae91462f987a86f5dff4c39f401dd80 | refs/heads/main | 2023-01-22T04:47:07.132558 | 2020-11-19T02:48:01 | 2020-11-19T02:48:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from sqlalchemy import extract
from hotelapp.models import *
def read_room_info(name=None, kind_of_room_id=None, status=None, amount=None):
room = Room.query.all()
kind = str(kind_of_room_id)
if name:
room = filter(lambda tt: tt.name == name, room)
if kind_of_room_id:
room = list(filter(lambda tt: tt.KindOfRoom.name == kind, room))
if status:
room = filter(lambda tt: tt.status.value == status, room)
return room
| [
"hien.hx133@gmail.com"
] | hien.hx133@gmail.com |
81e3da142f2a706ac4fbe041c6cc6057c44a4027 | 9be1ab6f7cc9e1e8474b7c76ef89284b54782c46 | /chapter17_errors/8_custom_exception.py | 8441acb47c175019e2ace90a11dcdee1fd945443 | [] | no_license | Nateque123/python_tutorials | 8d9842d46570e6cecd7aa5419b9f77bc4468d391 | 83743acf4862155c5837c154d0422f74d0629043 | refs/heads/master | 2022-11-20T11:39:02.565456 | 2020-07-24T11:08:34 | 2020-07-24T11:08:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | # how to create custom exception
class NameToShortError(ValueError):
pass
def check(name):
if len(name) < 8:
raise NameToShortError('you enter short name...')
else:
print(f'Hello {name}')
name1 = input('Enter your name: ')
print(check(name1)) | [
"noreply@github.com"
] | Nateque123.noreply@github.com |
95e2a602cdea202da5cba6e81d040adac387cb68 | ea3048858939a8162f82a1d0b0ec43171530ea8d | /apps/search/models.py | 62ec89db3a73a35a853d885c234a3453ffbb6a68 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kknet/NewsBlur | d229b12c39f7ca3eab1e28922171f87ea37b8df1 | fa78b434f980d2814dd05fedb70d9e87259ee998 | refs/heads/master | 2021-01-17T22:36:29.651729 | 2016-09-20T20:05:25 | 2016-09-20T20:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,270 | py | import re
import time
import datetime
import pymongo
import pyes
import redis
import celery
import mongoengine as mongo
from django.conf import settings
from django.contrib.auth.models import User
from apps.search.tasks import IndexSubscriptionsForSearch
from apps.search.tasks import IndexSubscriptionsChunkForSearch
from apps.search.tasks import IndexFeedsForSearch
from utils import log as logging
from utils.feed_functions import chunks
class MUserSearch(mongo.Document):
'''Search index state of a user's subscriptions.'''
user_id = mongo.IntField(unique=True)
last_search_date = mongo.DateTimeField()
subscriptions_indexed = mongo.BooleanField()
subscriptions_indexing = mongo.BooleanField()
meta = {
'collection': 'user_search',
'indexes': ['user_id'],
'index_drop_dups': True,
'allow_inheritance': False,
}
@classmethod
def get_user(cls, user_id, create=True):
try:
user_search = cls.objects.read_preference(pymongo.ReadPreference.PRIMARY)\
.get(user_id=user_id)
except cls.DoesNotExist:
if create:
user_search = cls.objects.create(user_id=user_id)
else:
user_search = None
return user_search
def touch_search_date(self):
if not self.subscriptions_indexed and not self.subscriptions_indexing:
self.schedule_index_subscriptions_for_search()
self.subscriptions_indexing = True
self.last_search_date = datetime.datetime.now()
self.save()
def schedule_index_subscriptions_for_search(self):
IndexSubscriptionsForSearch.apply_async(kwargs=dict(user_id=self.user_id),
queue='search_indexer_tasker')
# Should be run as a background task
def index_subscriptions_for_search(self):
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
SearchStory.create_elasticsearch_mapping()
start = time.time()
user = User.objects.get(pk=self.user_id)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(user.username, 'search_index_complete:start')
subscriptions = UserSubscription.objects.filter(user=user).only('feed')
total = subscriptions.count()
feed_ids = []
for sub in subscriptions:
try:
feed_ids.append(sub.feed.pk)
except Feed.DoesNotExist:
continue
feed_id_chunks = [c for c in chunks(feed_ids, 6)]
logging.user(user, "~FCIndexing ~SB%s feeds~SN in %s chunks..." %
(total, len(feed_id_chunks)))
tasks = [IndexSubscriptionsChunkForSearch().s(feed_ids=feed_id_chunk,
user_id=self.user_id
).set(queue='search_indexer')
for feed_id_chunk in feed_id_chunks]
group = celery.group(*tasks)
res = group.apply_async(queue='search_indexer')
res.join_native()
duration = time.time() - start
logging.user(user, "~FCIndexed ~SB%s feeds~SN in ~FM~SB%s~FC~SN sec." %
(total, round(duration, 2)))
r.publish(user.username, 'search_index_complete:done')
self.subscriptions_indexed = True
self.subscriptions_indexing = False
self.save()
def index_subscriptions_chunk_for_search(self, feed_ids):
from apps.rss_feeds.models import Feed
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
user = User.objects.get(pk=self.user_id)
logging.user(user, "~FCIndexing %s feeds..." % len(feed_ids))
for feed_id in feed_ids:
feed = Feed.get_by_id(feed_id)
if not feed: continue
feed.index_stories_for_search()
r.publish(user.username, 'search_index_complete:feeds:%s' %
','.join([str(f) for f in feed_ids]))
@classmethod
def schedule_index_feeds_for_search(cls, feed_ids, user_id):
user_search = cls.get_user(user_id, create=False)
if (not user_search or
not user_search.subscriptions_indexed or
user_search.subscriptions_indexing):
# User hasn't searched before.
return
if not isinstance(feed_ids, list):
feed_ids = [feed_ids]
IndexFeedsForSearch.apply_async(kwargs=dict(feed_ids=feed_ids, user_id=user_id),
queue='search_indexer')
@classmethod
def index_feeds_for_search(cls, feed_ids, user_id):
from apps.rss_feeds.models import Feed
user = User.objects.get(pk=user_id)
logging.user(user, "~SB~FCIndexing %s~FC by request..." % feed_ids)
for feed_id in feed_ids:
feed = Feed.get_by_id(feed_id)
if not feed: continue
feed.index_stories_for_search()
@classmethod
def remove_all(cls, drop_index=False):
# You only need to drop the index if there is data you want to clear.
# A new search server won't need this, as there isn't anything to drop.
if drop_index:
logging.info(" ---> ~FRRemoving stories search index...")
SearchStory.drop()
user_searches = cls.objects.all()
logging.info(" ---> ~SN~FRRemoving ~SB%s~SN user searches..." % user_searches.count())
for user_search in user_searches:
try:
user_search.remove()
except Exception, e:
print " ****> Error on search removal: %s" % e
def remove(self):
from apps.rss_feeds.models import Feed
from apps.reader.models import UserSubscription
user = User.objects.get(pk=self.user_id)
subscriptions = UserSubscription.objects.filter(user=self.user_id)
total = subscriptions.count()
removed = 0
for sub in subscriptions:
try:
feed = sub.feed
except Feed.DoesNotExist:
continue
if not feed.search_indexed:
continue
feed.search_indexed = False
feed.save()
removed += 1
logging.user(user, "~FCRemoved ~SB%s/%s feed's search indexes~SN for ~SB~FB%s~FC~SN." %
(removed, total, user.username))
self.delete()
class SearchStory:
ES = pyes.ES(settings.ELASTICSEARCH_STORY_HOSTS)
name = "stories"
@classmethod
def index_name(cls):
return "%s-index" % cls.name
@classmethod
def type_name(cls):
return "%s-type" % cls.name
@classmethod
def create_elasticsearch_mapping(cls, delete=False):
if delete:
cls.ES.indices.delete_index_if_exists("%s-index" % cls.name)
cls.ES.indices.create_index_if_missing("%s-index" % cls.name)
mapping = {
'title': {
'boost': 3.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'standard',
},
'content': {
'boost': 1.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'simple',
},
'tags': {
'boost': 2.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'standard',
},
'author': {
'boost': 1.0,
'index': 'analyzed',
'store': 'no',
'type': 'string',
'analyzer': 'simple',
},
'feed_id': {
'store': 'no',
'type': 'integer'
},
'date': {
'store': 'no',
'type': 'date',
}
}
cls.ES.indices.put_mapping("%s-type" % cls.name, {
'properties': mapping,
'_source': {'enabled': False},
}, ["%s-index" % cls.name])
@classmethod
def index(cls, story_hash, story_title, story_content, story_tags, story_author, story_feed_id,
story_date):
doc = {
"content" : story_content,
"title" : story_title,
"tags" : ', '.join(story_tags),
"author" : story_author,
"feed_id" : story_feed_id,
"date" : story_date,
}
try:
cls.ES.index(doc, "%s-index" % cls.name, "%s-type" % cls.name, story_hash)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def remove(cls, story_hash):
try:
cls.ES.delete("%s-index" % cls.name, "%s-type" % cls.name, story_hash)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def drop(cls):
cls.ES.indices.delete_index_if_exists("%s-index" % cls.name)
@classmethod
def query(cls, feed_ids, query, order, offset, limit):
cls.create_elasticsearch_mapping()
cls.ES.indices.refresh()
query = re.sub(r'([^\s\w_\-])+', ' ', query) # Strip non-alphanumeric
sort = "date:desc" if order == "newest" else "date:asc"
string_q = pyes.query.QueryStringQuery(query, default_operator="AND")
feed_q = pyes.query.TermsQuery('feed_id', feed_ids[:1000])
q = pyes.query.BoolQuery(must=[string_q, feed_q])
try:
results = cls.ES.search(q, indices=cls.index_name(), doc_types=[cls.type_name()],
partial_fields={}, sort=sort, start=offset, size=limit)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
logging.info(" ---> ~FG~SNSearch ~FCstories~FG for: ~SB%s~SN (across %s feed%s)" %
(query, len(feed_ids), 's' if len(feed_ids) != 1 else ''))
try:
result_ids = [r.get_id() for r in results]
except pyes.InvalidQuery(), e:
logging.info(" ---> ~FRInvalid search query \"%s\": %s" % (query, e))
return []
return result_ids
class SearchFeed:
_es_client = None
name = "feeds"
@classmethod
def ES(cls):
if cls._es_client is None:
cls._es_client = pyes.ES(settings.ELASTICSEARCH_FEED_HOSTS)
if not cls._es_client.indices.exists_index(cls.index_name()):
cls.create_elasticsearch_mapping()
return cls._es_client
@classmethod
def index_name(cls):
return "%s-index" % cls.name
@classmethod
def type_name(cls):
return "%s-type" % cls.name
@classmethod
def create_elasticsearch_mapping(cls, delete=False):
if delete:
cls.ES().indices.delete_index_if_exists(cls.index_name())
settings = {
"index" : {
"analysis": {
"analyzer": {
"edgengram_analyzer": {
"filter": ["edgengram"],
"tokenizer": "lowercase",
"type": "custom"
},
},
"filter": {
"edgengram": {
"max_gram": "15",
"min_gram": "1",
"type": "edgeNGram"
},
}
}
}
}
cls.ES().indices.create_index_if_missing(cls.index_name(), settings)
mapping = {
"address": {
"analyzer": "edgengram_analyzer",
"store": False,
"term_vector": "with_positions_offsets",
"type": "string"
},
"feed_id": {
"store": True,
"type": "string"
},
"num_subscribers": {
"index": "analyzed",
"store": True,
"type": "long"
},
"title": {
"analyzer": "edgengram_analyzer",
"store": False,
"term_vector": "with_positions_offsets",
"type": "string"
},
"link": {
"analyzer": "edgengram_analyzer",
"store": False,
"term_vector": "with_positions_offsets",
"type": "string"
}
}
cls.ES().indices.put_mapping(cls.type_name(), {
'properties': mapping,
}, [cls.index_name()])
cls.ES().indices.flush()
@classmethod
def index(cls, feed_id, title, address, link, num_subscribers):
doc = {
"feed_id" : feed_id,
"title" : title,
"address" : address,
"link" : link,
"num_subscribers" : num_subscribers,
}
try:
cls.ES().index(doc, cls.index_name(), cls.type_name(), feed_id)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
@classmethod
def query(cls, text, max_subscribers=5):
try:
cls.ES().default_indices = cls.index_name()
cls.ES().indices.refresh()
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
if settings.DEBUG:
max_subscribers = 1
logging.info("~FGSearch ~FCfeeds~FG: ~SB%s" % text)
q = pyes.query.BoolQuery()
q.add_should(pyes.query.MatchQuery('address', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
q.add_should(pyes.query.MatchQuery('link', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
q.add_should(pyes.query.MatchQuery('title', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
q = pyes.Search(q, min_score=1)
results = cls.ES().search(query=q, size=max_subscribers, doc_types=[cls.type_name()], sort="num_subscribers:desc")
return results
@classmethod
def export_csv(cls):
import djqscsv
qs = Feed.objects.filter(num_subscribers__gte=20).values('id', 'feed_title', 'feed_address', 'feed_link', 'num_subscribers')
csv = djqscsv.render_to_csv_response(qs).content
f = open('feeds.csv', 'w+')
f.write(csv)
f.close()
| [
"samuel@ofbrooklyn.com"
] | samuel@ofbrooklyn.com |
00c9949db590246f66d2bb3310ffbfe39a1fee79 | 9b24eb3a15e9acd4aaf7af00d88488f5a056438f | /backend/home/api/v1/viewsets.py | c7c28c17f806e899fca335a7c524c6cb75b776a2 | [] | no_license | crowdbotics-apps/dashboard-app-18025 | b8fb28008d42371c7d74102b78ae380725b3221a | 202f33b00e14f65adfc9dbf84f748ad5cc051652 | refs/heads/master | 2022-11-15T12:16:12.733390 | 2020-06-15T17:24:52 | 2020-06-15T17:24:52 | 271,619,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,485 | py | from rest_framework import viewsets
from rest_framework import authentication
from .serializers import (
AddressSerializer,
CustomTextSerializer,
HomePageSerializer,
XYSerializer,
)
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import IsAdminUser
from rest_framework.viewsets import ModelViewSet, ViewSet
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from home.api.v1.serializers import (
SignupSerializer,
CustomTextSerializer,
HomePageSerializer,
UserSerializer,
)
from home.models import Address, CustomText, HomePage, XY
class SignupViewSet(ModelViewSet):
serializer_class = SignupSerializer
http_method_names = ["post"]
class LoginViewSet(ViewSet):
"""Based on rest_framework.authtoken.views.ObtainAuthToken"""
serializer_class = AuthTokenSerializer
def create(self, request):
serializer = self.serializer_class(
data=request.data, context={"request": request}
)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data["user"]
token, created = Token.objects.get_or_create(user=user)
user_serializer = UserSerializer(user)
return Response({"token": token.key, "user": user_serializer.data})
class CustomTextViewSet(ModelViewSet):
serializer_class = CustomTextSerializer
queryset = CustomText.objects.all()
authentication_classes = (SessionAuthentication, TokenAuthentication)
permission_classes = [IsAdminUser]
http_method_names = ["get", "put", "patch"]
class HomePageViewSet(ModelViewSet):
serializer_class = HomePageSerializer
queryset = HomePage.objects.all()
authentication_classes = (SessionAuthentication, TokenAuthentication)
permission_classes = [IsAdminUser]
http_method_names = ["get", "put", "patch"]
class XYViewSet(viewsets.ModelViewSet):
serializer_class = XYSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = XY.objects.all()
class AddressViewSet(viewsets.ModelViewSet):
serializer_class = AddressSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Address.objects.all()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
a07369e5917c7c67c2f793f80ed4d1023bafb477 | 0b12a3aab3b06a9ff007eaf6daaca0a696be84c1 | /phase-0/classify.py | cdecf02a2e4252e324b6225b05f950a25d64ba8c | [] | no_license | fariszahrah/crypto-twitter | 1cfdaa7db8f14d9a5280d68280690c7aa70162c3 | 2eef22a5e1d71d89d0f1e5f7d344cbb7c929b91d | refs/heads/master | 2021-01-08T23:45:00.693390 | 2020-02-21T16:27:36 | 2020-02-21T16:27:36 | 242,178,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,389 | py | from sklearn.ensemble import RandomForestClassifier
import sklearn as sk
import re
import nltk
from sklearn.feature_extraction.text import *
import pandas as pd
import numpy as np
import pickle
from collections import Counter
'''
the train dataset was created from the following lines of code:
****
Note please dont run this or it will
override the tweets i manually evaluated
****
sample = tweets.sample(frac=1/20,random_state=3)
sample.to_excel('train_tweets.xlsx')
in excel I added a column for target. and that is waht I use as the training dataset below
'''
def download_data():
train = pd.read_excel('./train_tweets.xlsx') # this is a dataframe of tweet objects, although i will only use the 'text' field for classificationn
target = train['Target'] # these are the manual classifications i made
# train.drop(['Target','contributors','favorited','id','id_str','retweeted','coordinates','created_at','geo'], inplace=True, axis=1)
test = pd.read_pickle('./main_user_tweets.pkl') # a dataframe of tweet objects, I use the 'text' field for testing the classifier
return train, target, test
def predict(train, target, test):
t = [i for i in train['text'].tolist()] # list of tweets from train
t1 = [i for i in test['text'].tolist()] # list of tweets from test
tweet_sum = t + t1 # a list of all tweets to use for creating a feauture matrix
########## The next 6 lines are the actual 6 lines of code to train the model and predict the test tweets
tfidf_vectorizer = TfidfVectorizer(min_df=2)
X_tfidf = tfidf_vectorizer.fit_transform(tweet_sum) #creating feature matrix for entire vocab
train_df = pd.DataFrame(X_tfidf.todense()).iloc[:415] # train feature matrix
test_df = pd.DataFrame(X_tfidf.todense()).iloc[415:] # test feature matrix
RF = RandomForestClassifier(n_estimators=100, max_depth=40, random_state=0).fit(train_df, target) # create the classifier, using the train_df and the target values I entered
predictions = RF.predict(test_df) # predict tweet classification
return predictions
def print_pred(predictions): # this is all just for printing... fluff
counter = Counter(predictions)
print('Number of non-subject tweets: {0}'.format(counter[2]))
print('Number of Technology focussed tweets: {0}'.format(counter[1]))
print('Number of Trading focussed tweets: {0}'.format(counter[0]))
def print_examples(predictions,test): # this is also just for printing... fluff
n=False
tech=False
trade=False
for i,v in enumerate(list(predictions)):
if i > 22:
if predictions[i] == 0 and trade==False:
print('\nTweet classified as a trading tweet:\n', test.iloc[415+i]['text'])
trade = True
elif predictions[i] == 1 and tech==False:
print('\nTweet classified as a technology tweet:\n', test.iloc[415+i]['text'])
tech = True
elif predictions[i] == 2 and n==False:
print('\nTweet classified as a Non subject tweet:\n', test.iloc[415+i]['text'])
n = True
def main():
train, target, test = download_data()
predictions = predict(train, target, test)
print_pred(predictions)
print_examples(predictions, test)
if __name__ == "__main__":
main()
| [
"fariszahrah@Fariss-MBP.attlocal.net"
] | fariszahrah@Fariss-MBP.attlocal.net |
005b11fedd1241560633f3f19ce4ab82b6cf9068 | 43dabf77afd5c44d55b465c1b88bf9a5e7c4c9be | /resize.py | 306400848b45f96d2ec9be96bbc1dbae1a9871f7 | [] | no_license | geegatomar/OpenCV-Computer-Vision-Adrian-Rosebrock | cc81a990a481b5e4347dd97369b38479b46e55bc | daa579309010e6e7fefb004b878ffb26374401d0 | refs/heads/master | 2022-11-18T13:07:08.040483 | 2020-07-20T01:55:39 | 2020-07-20T01:55:39 | 280,987,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import cv2
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path of image")
ap.add_argument("-w", "--width", default=100, help="Width of resized img")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
width = int(args["width"])
ratio = width / image.shape[1]
dim = (int(ratio * image.shape[0]), width)
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
cv2.imshow("Resized img", resized)
cv2.waitKey(0)
| [
"geegatomar@gmail.com"
] | geegatomar@gmail.com |
612f3220df184b463f51fd5d95a6580cca79748d | 4ad3624c676defcf75a3235cc014534806dde50b | /baranovperictyrant.py | f60ee7cfb02dc67a198b3934d2a99733a1b8f959 | [] | no_license | gbaranov99/BaranovPericCS407Program1 | 5ca2b0eeb3b290d0142eb8e4b9a4da0acf7a4bf1 | ab29da5887f5f1ff1fda4b2e3dd4bdb3209e7b88 | refs/heads/master | 2023-02-27T20:47:03.196996 | 2021-02-13T01:07:38 | 2021-02-13T01:07:38 | 338,470,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,414 | py | #!/usr/bin/python
# This is a dummy peer that just illustrates the available information your peers
# have available. The setup script will copy it to create the versions you edit
import random
import logging
from messages import Upload, Request
from util import even_split
from peer import Peer
class BaranovPericTyrant(Peer):
def post_init(self):
print("post_init(): %s here!" % self.id)
##################################################################################
# Declare any variables here that you want to be able to access in future rounds #
##################################################################################
#This commented out code is and example of a python dictionsary,
#which is a convenient way to store a value indexed by a particular "key"
#self.dummy_state = dict()
#self.dummy_state["cake"] = "lie"
def requests(self, peers, history):
"""
peers: available info about the peers (who has what pieces)
history: what's happened so far as far as this peer can see
returns: a list of Request() objects
This will be called after update_pieces() with the most recent state.
"""
#Calculate the pieces you still need
needed = lambda i: self.pieces[i] < self.conf.blocks_per_piece
needed_pieces = list(filter(needed, list(range(len(self.pieces)))))
np_set = set(needed_pieces) # sets support fast intersection ops.
logging.debug("%s here: still need pieces %s" % (
self.id, needed_pieces))
#This code shows you what you have access to in peers and history
#You won't need it in your final solution, but may want to uncomment it
#and see what it does to help you get started
"""
logging.debug("%s still here. Here are some peers:" % self.id)
for p in peers:
logging.debug("id: %s, available pieces: %s" % (p.id, p.available_pieces))
logging.debug("And look, I have my entire history available too:")
logging.debug("look at the AgentHistory class in history.py for details")
logging.debug(str(history))
"""
requests = [] # We'll put all the things we want here
# Symmetry breaking is good...
random.shuffle(needed_pieces)
# count frequencies of all pieces that the other peers have
# this will be useful for implementing rarest first
###########################################################
# you'll need to write the code to compute these yourself #
###########################################################
frequencies = {}
# Python syntax to perform a sort using a user defined sort key
# This exact sort is probably not a useful sort, but other sorts might be useful
# peers.sort(key=lambda p: p.id)
# request all available pieces from all peers!
# (up to self.max_requests from each)
#############################################################################
# This code asks for pieces at random, you need to adapt it to rarest first #
#############################################################################
for peer in peers:
av_set = set(peer.available_pieces)
isect = av_set.intersection(np_set)
n = min(self.max_requests, len(isect))
# More symmetry breaking -- ask for random pieces.
# You could try fancier piece-requesting strategies
# to avoid getting the same thing from multiple peers at a time.
for piece_id in random.sample(isect, int(n)):
# aha! The peer has this piece! Request it.
# which part of the piece do we need next?
# (must get the next-needed blocks in order)
#
# If you loop over the piece_ids you want to request above
# you don't need to change the rest of this code
start_block = self.pieces[piece_id]
r = Request(self.id, peer.id, piece_id, start_block)
requests.append(r)
return requests
def uploads(self, requests, peers, history):
"""
requests -- a list of the requests for this peer for this round
peers -- available info about all the peers
history -- history for all previous rounds
returns: list of Upload objects.
In each round, this will be called after requests().
"""
##############################################################################
# The code and suggestions here will get you started for the standard client #
# You'll need to change things for the other clients #
##############################################################################
round = history.current_round()
logging.debug("%s again. It's round %d." % (
self.id, round))
# One could look at other stuff in the history too here.
# For example, history.downloads[round-1] (if round != 0, of course)
# has a list of Download objects for each Download to this peer in
# the previous round.
if len(requests) == 0:
logging.debug("No one wants my pieces!")
chosen = []
bws = []
else:
logging.debug("Still here: uploading to a random peer")
########################################################################
# The dummy client picks a single peer at random to unchoke. #
# You should decide a set of peers to unchoke accoring to the protocol #
########################################################################
request = random.choice(requests)
chosen = [request.requester_id]
# Now that we have chosen who to unchoke, the standard client evenly shares
# its bandwidth among them
bws = even_split(self.up_bw, len(chosen))
# create actual uploads out of the list of peer ids and bandwidths
# You don't need to change this
uploads = [Upload(self.id, peer_id, bw)
for (peer_id, bw) in zip(chosen, bws)]
return uploads
| [
"gbaranov99@gmail.com"
] | gbaranov99@gmail.com |
2534efd7cf1a472d4c24db7e37fb628ef53a3a0f | 9adda6cef38c05c0d6bc4f5d0be25e75500f3406 | /ques 2 sol.py | 00f2329450eb86ff204e44c7f8653fbee1abdcff | [] | no_license | GLAU-TND/python-programming-assignment4-upadhyay8844 | 09255dd1ef340f7af3ee57e4eee3c671c010d5c4 | bc5c31d40f03cceebb2c842bdd933e0e73a998a1 | refs/heads/master | 2021-05-19T05:26:14.857261 | 2020-04-01T11:43:27 | 2020-04-01T11:43:27 | 251,547,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | def is_dict(var):
return str(type(var)) == "<class 'dict'>"
def flatten_helper(d, flat_d, path):
if not is_dict(d):
flat_d[path] = d
return
for key in d:
new_keypath = "{}.{}".format(path, key) if path else key
flatten_helper(d[key], flat_d, new_keypath)
def flatten(d):
flat_d = dict()
flatten_helper(d, flat_d, "")
return flat_d
| [
"noreply@github.com"
] | GLAU-TND.noreply@github.com |
2bd8ed87c34f5106c1bf3d36425d3ecae107c5ea | 43f5332bfc67e67ddb1e52e7eae40306ce7ef1e2 | /12/tests.py | a072532fc33103350a0c1c5c0b21149b8b84a7d9 | [
"MIT"
] | permissive | remihuguet/aoc2020 | 943d9713c5edf2a80aa9e11a46d89ad3ef72b88c | c313c5b425dda92d949fd9ca4f18ff66f452794f | refs/heads/main | 2023-04-13T05:07:31.317561 | 2021-03-30T20:36:48 | 2021-03-30T20:36:48 | 317,330,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,326 | py | import rainrisk
def test_compute_new_direction():
direction = (1, 0)
assert (0, -1) == rainrisk.compute_new_direction(direction, 'R90')
assert (0, 1) == rainrisk.compute_new_direction(direction, 'L90')
assert (-1, 0) == rainrisk.compute_new_direction(direction, 'R180')
assert (-1, 0) == rainrisk.compute_new_direction(direction, 'L180')
assert (0, 1) == rainrisk.compute_new_direction(direction, 'R270')
assert (0, -1) == rainrisk.compute_new_direction(direction, 'L270')
assert (1, 0) == rainrisk.compute_new_direction(direction, 'R360')
assert (1, 0) == rainrisk.compute_new_direction(direction, 'L360')
direction = (0, 1)
assert (1, 0) == rainrisk.compute_new_direction(direction, 'R90')
assert (-1, 0) == rainrisk.compute_new_direction(direction, 'L90')
assert (0, -1) == rainrisk.compute_new_direction(direction, 'R180')
assert (0, -1) == rainrisk.compute_new_direction(direction, 'L180')
assert (-1, 0) == rainrisk.compute_new_direction(direction, 'R270')
assert (1, 0) == rainrisk.compute_new_direction(direction, 'L270')
assert (0, 1) == rainrisk.compute_new_direction(direction, 'R360')
assert (0, 1) == rainrisk.compute_new_direction(direction, 'L360')
def test_compute_final_position():
with open('12/test_input.txt', 'r') as f:
mvts = f.readlines()
assert (17, -8) == rainrisk.compute_final_position(mvts)
def test_compute_manhattan():
assert 25 == rainrisk.compute_manhattan('12/test_input.txt')
def test_compute_position_waypoint():
with open('12/test_input.txt', 'r') as f:
mvts = f.readlines()
initial = (10, 1)
assert (214, -72) == rainrisk.compute_position_waypoint(mvts, initial)
def test_compute_final_manhattan():
assert 286 == rainrisk.compute_final_manhattan('12/test_input.txt')
def test_compute_speed_rotation():
speed = (10, 1)
assert (1, -10) == rainrisk.compute_speed_rotation(speed, 'R90')
assert (-1, 10) == rainrisk.compute_speed_rotation(speed, 'L90')
assert (-10, -1) == rainrisk.compute_speed_rotation(speed, 'R180')
assert (-10, -1) == rainrisk.compute_speed_rotation(speed, 'L180')
assert (-1, 10) == rainrisk.compute_speed_rotation(speed, 'R270')
assert (1, -10) == rainrisk.compute_speed_rotation(speed, 'L270')
| [
"remi.huguet@gmail.com"
] | remi.huguet@gmail.com |
5c88190ae443b4fec8426fb3e97aa3b52ac51b19 | 3ac7e1ec8c3551b449e10c43c76e44d285462502 | /manage.py | 24c38809017765d996b7094cdc22bfd076324a4c | [] | no_license | slayyy/render-manager | 612ea2c2e611875df7b453a177401ecfeeb9962d | efbcf6f2b30061bfa14bb22994e0029e73691896 | refs/heads/master | 2022-05-08T23:45:14.461612 | 2019-10-27T17:19:52 | 2019-10-27T17:19:52 | 215,014,775 | 0 | 0 | null | 2022-04-22T22:33:52 | 2019-10-14T10:31:53 | Python | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "render_manager.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"decobert.a78@gmail.com"
] | decobert.a78@gmail.com |
3bad8e294dbb199e5e40870811281cceb64d295c | cd1465251c5b4536f8d0c634c0efaff4d628c87c | /Semantic_Analysers/ColourAnalyser.py | cfea4b8096683c0c4cb9bfcef3f1d8532d6f9037 | [] | no_license | BennyMurray/Scoop | 9794bca1d1d4b159e041a8c482a256de852a9e6d | 01f9c31affbb291a5507d86ab88a637a2dafea28 | refs/heads/master | 2021-03-27T18:54:21.094647 | 2017-07-16T21:25:24 | 2017-07-16T21:25:24 | 77,929,600 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | from __future__ import division
def analyseColour(word_list):
num_set = [0.33, 1.68, 2.2, 2.36, 2.87, 0.29, 0.27, 2.94, 0.81, 0.22, 2.59, 1.06]
colour_value_list = [
(1, ["pale"], num_set[0]),
(3, ["straw"], num_set[1]),
(4, ["yellow"], num_set[2]),
(5, ["gold"], num_set[3]),
(8, ["amber"], num_set[4]),
(12, ["red"], num_set[5]),
(16, ["copper"], num_set[6]),
(18, ["murky"], num_set[7]),
(21, ["brown"], num_set[8]),
(26, ["muddy"], num_set[9]),
(32, ["black"], num_set[10]),
(37, ["opaque"], num_set[11])
]
colour_result_dict = {
1: 0,
3: 0,
4: 0,
5: 0,
8: 0,
12: 0,
16: 0,
18: 0,
21: 0,
26: 0,
32: 0,
37: 0
}
for word in word_list:
for colour_value in colour_value_list:
for descriptor_list in colour_value[1]:
if word in descriptor_list:
colour_result_dict[colour_value[0]] += colour_value[2]
sorted_dictionary = sorted(colour_result_dict.items(), key=lambda x: x[1], reverse=True)
return sorted_dictionary[0][0]
| [
"bjamurray@gmail.com"
] | bjamurray@gmail.com |
4c61a7aae73fa64897e0df01720f5f1eed93b6dd | 16de2efcba33961633c1e63e493986bad54c99bd | /test.py | 73b7e8d90f6b8b0378a1486d70f70ac2af704483 | [] | no_license | thakur-nishant/Algorithms | a0cc45de5393d4cbb428cccdbf81b6937cdf97d7 | 1a0306ca9a9fc68f59e28ea26c24822c15350294 | refs/heads/master | 2022-01-07T22:22:09.764193 | 2019-05-17T20:10:24 | 2019-05-17T20:10:24 | 109,093,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | from math import log
from random import random
import matplotlib.pyplot as plt
import numpy as np
l = 2
T = 24
curr = -1/l * log(random())
arrival = [curr]
while curr < T:
curr = curr -1/l * log(random())
arrival.append(curr)
arrival = arrival[1:]
t = np.arange(0.0, T, 0.01)
N = len(t)
X = np.zeros(N)
for i in range(N):
X[i] = np.sum(arrival <= t[i])
plt.plot(t, X)
plt.xlabel('time(hrs)')
plt.show()
| [
"nt.nishantt@gmail.com"
] | nt.nishantt@gmail.com |
1d0f92dbe95dbd0d77497c536f85a8ee3cac119f | 7504e3c8400986b8d72227a9ae6d084a2eb9de09 | /acs/cli.py | a8ed40bfa7fe927c66f9bba4e722cfba7b0100ea | [
"Apache-2.0"
] | permissive | keikhara/acs-cli | 848b288e009591650e726fb7784e3acff5d0fc49 | b0d53679f7642c655bfb84ea3e7918b16af7dc25 | refs/heads/master | 2020-12-11T07:18:57.236131 | 2016-07-26T05:59:26 | 2016-07-26T05:59:26 | 65,325,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | """
acs
Usage:
acs [--config-file=<file>] [--version] [--help] <command> [<args>...]
Options:
-h --help Show this help.
Commands:
service Create and manage Azure Container Service
docker Send docker commands to the cluster
afs Add the Azure Files Docker volume driver to each agent
oms Add or configure Operational Management Suite monitoring
See `acs <command> --help` for information on a specific command.
Help:
For help using this tool please open an issue on the GitHub repository:
https://github.com/rgardler/acs-scripts
"""
from . import __version__ as VERSION
from acs.commands.base import Config
from docopt import docopt
from inspect import getmembers, isclass
import os.path
import sys
def main():
"""Main CLI entrypoint"""
from . import commands
args = docopt(__doc__, version=VERSION, options_first=True)
config = Config(args['--config-file'])
command_name = args["<command>"]
argv = args['<args>']
module = getattr(commands, command_name)
commands = getmembers(module, isclass)
command_class = None
command = None
for k, command_class in commands:
if command_name.lower() in command_class.__name__.lower():
command = command_class(config, argv)
if command is None:
raise Exception("Unrecognized command: " + command_name)
command.run()
| [
"ross@gardler.org"
] | ross@gardler.org |
6dcdc505bde9ee4996f62e4c6d83879c0f9d77ab | 1d4cdffde9e2cf750b0fbe0cc06f4f4455393762 | /Chapter09/Python/02-geocoding-parallel-with-python.py | ad3faa246f40c105739ee455cf75e592df013813 | [
"MIT"
] | permissive | ndarvishev/Extending-Power-BI-with-Python-and-R | 59b3a82bb7bf863756c71d8bff11c41ad9164fe3 | d52b7064128ff086c315335d780df3dde1fb13d5 | refs/heads/main | 2023-07-20T07:27:22.377184 | 2021-09-05T14:29:09 | 2021-09-05T14:29:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,343 | py | # %%
import os
import requests
import urllib
import json
import pandas as pd
import dask.dataframe as dd
import time
# %%
def bing_geocode_via_address(address):
# trim the string from leading and trailing spaces using strip
full_url = f"{base_url}query={urllib.parse.quote(address.strip(), safe='')}?key={AUTH_KEY}"
r = requests.get(full_url)
try:
data = r.json()
# number of resources found, used as index to get the
# latest resource
num_resources = data['resourceSets'][0]['estimatedTotal']
formattedAddress = data['resourceSets'][0]['resources'][num_resources-1]['address']['formattedAddress']
lat = data['resourceSets'][0]['resources'][num_resources-1]['point']['coordinates'][0]
lng = data['resourceSets'][0]['resources'][num_resources-1]['point']['coordinates'][1]
except:
num_resources = 0
formattedAddress = None
lat = None
lng = None
text = r.text
status = r.reason
url = r.url
return num_resources, formattedAddress, lat, lng, text, status, url
def enrich_with_geocoding(passed_row, col_name):
# Fixed waiting time to avoid the "Too many requests" error
# as basic accounts are limited to 5 queries per second
time.sleep(3)
address_value = str(passed_row[col_name])
num_resources, address_formatted, address_lat, address_lng, text, status, url = bing_geocode_via_address(address_value)
#passed_row.reset_index(drop=True, inplace=True)
passed_row['numResources'] = num_resources
passed_row['formattedAddress'] = address_formatted
passed_row['latitude'] = address_lat
passed_row['longitude'] = address_lng
passed_row['text'] = text
passed_row['status'] = status
passed_row['url'] = url
return passed_row
# %%
####################################################################################################
# To be set up separately for security reasons
####################################################################################################
os.environ['BINGMAPS_API_KEY'] = '<your-api-key>'
####################################################################################################
base_url= "http://dev.virtualearth.net/REST/v1/Locations/"
AUTH_KEY = os.environ.get('BINGMAPS_API_KEY')
# %%
ddf_orig = dd.read_csv(r'D:\LZavarella\OneDrive\MVP\PacktBook\Code\Extending-Power-BI-with-Python-and-R\Chapter09\geocoding_test_data.csv',
encoding='latin-1')
ddf = ddf_orig[['full_address','lat_true','lon_true']]
ddf.npartitions
# %%
ddf = ddf.repartition(npartitions=os.cpu_count()*2)
ddf.npartitions
# %%
enriched_ddf = ddf.apply(enrich_with_geocoding, axis=1, col_name='full_address',
meta={'full_address': 'string', 'lat_true': 'float64', 'lon_true': 'float64',
'numResources': 'int32', 'formattedAddress': 'string',
'latitude': 'float64', 'longitude': 'float64', 'text': 'string',
'status': 'string', 'url': 'string'})
tic = time.perf_counter()
enriched_df = enriched_ddf.compute()
toc = time.perf_counter()
print(f'{enriched_df.shape[0]} addresses geocoded in {toc - tic:0.4f} seconds')
# %%
enriched_df
# %%
| [
"lucazavarella@outlook.com"
] | lucazavarella@outlook.com |
a4b5aa67b31a6384126ccb1f35d8fff5774c6f6b | bf6626d2f70ef72963eb6290bdcf44dc75531aa1 | /hoodwatch/settings.py | 77ae46e54251375fc08635b55d0fde36cbd22c3e | [
"MIT"
] | permissive | amtesire/Hood-project | c70930f110668a9549809f329543e62d9c036750 | 8078a2c85ba06cdddab54a4960168fb2e237e122 | refs/heads/master | 2023-02-23T19:50:39.233884 | 2021-02-02T14:53:28 | 2021-02-02T14:53:28 | 334,227,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,411 | py | """
Django settings for hoodwatch project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'hood',
'pyuploadcare.dj',
]
UPLOADCARE = {
'pub_key': '2b709bca64245dd9e55e',
'secret': '0a60851de5f3db2dc728',
}
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hoodwatch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hoodwatch.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
MODE=config("MODE", default="dev")
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Kigali'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# configuring the location for media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'index'
# Configure Django App for Heroku.
django_heroku.settings(locals())
| [
"tesiregisele@gmail.com"
] | tesiregisele@gmail.com |
c5c0efac5b659bbee446fae9d8a327987f1d99ea | 222ffb1996699dc5e0cb5e1c239698b872003c8d | /03_Multiply.py | 25ef5ced75ea4642a8dfeba6f61b0335756cb142 | [] | no_license | AdityaSA99/IoT-Programs | 3dcbf8ff24a978b530b0ada02a2b7ad5ed378462 | 20c5e910e20529bff648fa5523296f8712c6faeb | refs/heads/master | 2020-05-31T06:27:28.901576 | 2019-06-10T15:40:43 | 2019-06-10T15:40:43 | 190,142,164 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | a = float(input("Enter value of A :"))
b = float(input("Enter value of B :"))
c = a*b
print(("Product of {0} and {1} is {2}").format(a,b,c))
| [
"noreply@github.com"
] | AdityaSA99.noreply@github.com |
d27b115ccb6e7ef16a5a8dc0e95cc0d084f526b3 | 1d9d6f72b10dd34fd501b8ba58b1b8bfcb2ebb72 | /spraying/detect_blobs.py | 0efb6f21908c7287a1b7ba03fbcdda00698467ab | [] | no_license | crushendo/spraying | eb684ae0a1691e39f74978c2556a000e394920c2 | 93644410a17351ee8ae85f66fe8622887670f2c3 | refs/heads/master | 2021-01-19T22:46:57.663738 | 2017-08-11T19:11:00 | 2017-08-11T19:11:00 | 88,867,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,690 | py | import cv2
import numpy as np
class blob_detector():
def main(self):
blobs = blob_detector()
img, params = blobs.initialize()
all_keypoints = blobs.all_drops(img, params)
single_keypoints, diameter_list = blobs.single_drops(img,params)
double_keypoints = blobs.double_drops(img,params)
triple_keypoints = blobs.triple_drops(img,params)
blobs.output(img, all_keypoints, single_keypoints, double_keypoints, triple_keypoints)
def initialize(self):
#img = cv2.imread("thresholded_paper_5.png")
img = cv2.imread("spray_paper_large.tiff")
params = cv2.SimpleBlobDetector_Params()
return img, params
# ----------------
# Single Droplets
# ----------------
def single_drops(self,img,params):
# Minimum Area
params.filterByArea = True
params.minArea = 10
# Circularity
params.filterByCircularity = True
params.minCircularity = 0
#Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.2
# Convexity Parameters
params.filterByConvexity = True
params.minConvexity = 0.93
params.maxConvexity = 1
#Set up detector
detector = cv2.SimpleBlobDetector(params)
# Detect blobs
single_keypoints = detector.detect(img)
single_drops = len(single_keypoints)
i=0
diameter_list = []
#for keypoint in single_keypoints:
# keypoint = single_keypoints.size
# keypoint = keypoint / ppi
# keypoint = keypoint * keypoint * 3.14159 * 0.25
# drop_d = keypoint ** 0.455 * 1.06
# diameter_list[i] = drop_d
# i += 1
print "Single drops: " + str(single_drops)
return single_keypoints, diameter_list
# ----------------
# Double Droplets
# ----------------
def double_drops(self,img,params):
# Minimum Area
params.filterByArea = True
params.minArea = 10
# Circularity
params.filterByCircularity = True
params.minCircularity = 0
#Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.2
# Convexity Parameters
params.filterByConvexity = True
params.minConvexity = 0.90
params.maxConvexity = 0.92
# Set up detector
detector = cv2.SimpleBlobDetector(params)
# Detect blobs
double_keypoints = detector.detect(img)
double_drops = len(double_keypoints)
print "Double drops: " + str(double_drops)
return double_keypoints
# ----------------
# Triple Droplets
# ----------------
def triple_drops(self,img,params):
# Minimum Area
params.filterByArea = True
params.minArea = 10
# Circularity
params.filterByCircularity = True
params.minCircularity = 0
# Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.2
# Convexity Parameters
params.filterByConvexity = True
params.minConvexity = 0.85
params.maxConvexity = 0.89
# Set up detector
detector = cv2.SimpleBlobDetector(params)
# Detect blobs
triple_keypoints = detector.detect(img)
triple_drops = len(triple_keypoints)
print "Triple drops: " + str(triple_drops)
return triple_keypoints
# -------------
# All Droplets
# -------------
def all_drops(self,img,params):
# Minimum Area
params.filterByArea = True
params.minArea = 10
# Circularity
params.filterByCircularity = True
params.minCircularity = 0
# Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.2
# Convexity Parameters
params.filterByConvexity = True
params.minConvexity = 0.85
params.maxConvexity = 1
# Set up detector
detector = cv2.SimpleBlobDetector(params)
# Detect blobs
all_keypoints = detector.detect(img)
size = all_keypoints[0].size
print "Size: " + str(size)
all_drops = len(all_keypoints)
print "All drops: " + str(all_drops)
return all_keypoints
def stats(self, diameter_list):
sorted_list = diameter_list.sort(key=float)
list_length = len(sorted_list)
index = (list_length - 1) // 2
if list_length % 2:
num_median = sorted_list[index]
else:
num_median = (sorted_list[index] + sorted_list[index + 1]) / 2
for diameter in sorted_list:
total_area = diameter * 3.14159 * 0.25
current_area = 0
i = 0
while current_area <= total_area:
current_area += sorted_list[i] * 3.14159 * 0.25
vol_median = (sorted_list[i] + sorted_list[i - 1]) / 2
def output(self, img, all_keypoints, single_keypoints, double_keypoints, triple_keypoints):
# Draw detected blobs with circles around them
im_with_keypoints = cv2.drawKeypoints(img, all_keypoints, np.array([]), (255,255,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#im_with_keypoints = cv2.drawKeypoints(im_with_keypoints, single_keypoints, np.array([]), (0, 0, 255))
#im_with_keypoints = cv2.drawKeypoints(im_with_keypoints, double_keypoints, np.array([]), (0, 255, 0))
#im_with_keypoints = cv2.drawKeypoints(im_with_keypoints, triple_keypoints, np.array([]), (255, 0, 0))
cv2.imshow("im_with_keypoints", im_with_keypoints)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
blobs = blob_detector()
blobs.main()
| [
"ryan.ackett@gmail.com"
] | ryan.ackett@gmail.com |
4481446207abde7e6ba8f0c16de738b7d78e0e02 | 2a7b79c98aa6f8b36a68c96937cd8f4577ff48be | /neural_network/californiaHousingNeuralNet1.py | 2a8956b56cc13ec401ebb1d7395c90045c79c2f1 | [] | no_license | Utlak88/California-Housing-Dataset | 6935a207e465d48e3fcef2930d8e65bc7d9c4c99 | 6aae815d323e39d041586fdc54da6c7f83809995 | refs/heads/main | 2023-02-01T16:37:53.305730 | 2020-12-23T07:40:25 | 2020-12-23T07:40:25 | 323,824,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,676 | py | # Neural network based on the Google Machine Learning Crash Course
################################################################################
# Importing modules
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn import metrics
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
print("Imported modules.")
################################################################################
################################################################################
# Defining functions for model as well as data visualizations
def plot_the_loss_curve(epochs, mse):
"""Plot a curve of loss vs. epoch."""
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Mean Squared Error")
plt.plot(epochs, mse, label="Loss")
plt.legend()
plt.ylim([mse.min() * 0.95, mse.max() * 1.03])
plt.show()
print("Defined function to generate curve of loss vs epoch.")
def plot_r_squared_comparison(y_test, y_predict, title):
"""Produce R-squared plot to evaluate quality of model prediction of test data."""
r_squared = metrics.r2_score(y_predict, y_test)
plt.scatter(y_test, y_predict)
plt.xlabel("Normalized Actual Values")
plt.ylabel("Normalized Predicted Values")
plt.title(title)
plt.plot(
np.unique(y_test),
np.poly1d(np.polyfit(y_test, y_predict, 1))(np.unique(y_test)),
)
x_r2_label_placement = pd.Series(y_test).median() - 1.2 * pd.Series(y_test).std()
y_r2_label_placement = (
pd.Series(y_predict).median() + 3 * pd.Series(y_predict).std()
)
plt.text(
x_r2_label_placement,
y_r2_label_placement,
"R-squared = {0:.2f}".format(r_squared),
)
plt.show()
print("Defined function to generate R-squared plot.")
def create_model(my_learning_rate, my_feature_layer):
"""Create and compile a simple linear regression model."""
model = tf.keras.models.Sequential()
model.add(my_feature_layer)
model.add(
tf.keras.layers.Dense(
units=20,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.04),
name="Hidden1",
)
)
model.add(
tf.keras.layers.Dense(
units=12,
activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.04),
name="Hidden2",
)
)
model.add(tf.keras.layers.Dense(units=1, name="Output"))
model.compile(
optimizer=tf.keras.optimizers.Adam(lr=my_learning_rate),
loss="mean_squared_error",
metrics=[tf.keras.metrics.MeanSquaredError()],
)
return model
def train_model(model, dataset, epochs, label_name, batch_size=None):
"""Train the model by feeding it data."""
features = {name: np.array(value) for name, value in dataset.items()}
label = np.array(features.pop(label_name))
history = model.fit(
x=features, y=label, batch_size=batch_size, epochs=epochs, shuffle=True
)
epochs = history.epoch
# Track the progression of training, gather a snapshot
# of the model's mean squared error at each epoch.
hist = pd.DataFrame(history.history)
mse = hist["mean_squared_error"]
return epochs, mse
print("Defined the create_model and train_model functions.")
################################################################################
################################################################################
# Adjusting the granularity of reporting.
pd.options.display.max_rows = 10
pd.options.display.float_format = "{0:1.3f}".format
################################################################################
################################################################################
# Importing data
train_data = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv"
)
# shuffle the examples
train_data = train_data.reindex(np.random.permutation(train_data.index))
test_data = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/california_housing_test.csv"
)
print("Imported data.")
################################################################################
################################################################################
# TRAIN FEATURE ENGINEERING (PART 1)
# Defining variable for use to assign column values to column variables
# data = train_data
# # Initially defining column variables
# (
# longitude,
# latitude,
# housing_median_age,
# total_rooms,
# total_bedrooms,
# population,
# households,
# median_income,
# median_house_value,
# ) = range(0, len(data.columns))
# # Assigning column values to column variables
# dict_for_columns = {}
# for x in range(0, len(data.columns)):
# dict_for_columns[data.columns[x]] = data[data.columns[x]]
# # Defining column variables for use in data analysis
# globals().update(dict_for_columns)
# # Visualizing data
# # train_data.hist(figsize=[20,13])
# # train_data.boxplot(figsize=[20,13])
# # train_data.drop('median_house_value',axis=1).boxplot(figsize=[20,13])
# # Clipping outliers
# total_rooms[total_rooms > 6000] = 6000
# train_data[train_data.columns[3]] = total_rooms
# total_bedrooms[total_bedrooms > 1300] = 1300
# train_data[train_data.columns[4]] = total_bedrooms
# population[population > 3000] = 3000
# train_data[train_data.columns[5]] = population
# households[households > 1250] = 1250
# train_data[train_data.columns[6]] = households
# median_income[median_income > 8.5] = 8.5
# train_data[train_data.columns[7]] = median_income
# print("Clipped train features.")
# Z-Score Normalizing
# columns_for_normalizing = train_data[train_data.columns[0:9]]
# normalized_columns = (
# columns_for_normalizing - columns_for_normalizing.mean()
# ) / columns_for_normalizing.std()
# train_data[normalized_columns.columns] = normalized_columns
# print("Normalized train features.")
# # Revisualizing data
# # train_data.hist(figsize=[20,13])
# # train_data.drop('median_house_value',axis=1).boxplot(figsize=[20,13])
# # Adding new feature calculating the ratio of total bedrooms to total rooms
# train_data["rooms_ratio"] = train_data["total_bedrooms"] / train_data["total_rooms"]
# print("Added new train data feature calculating the ratio of total bedrooms to total rooms.")
################################################################################
################################################################################
# TEST FEATURE ENGINEERING (PART 1)
# Defining variable for use to assign column values to column variables
# data = test_data
# # Initially defining column variables
# (
# longitude,
# latitude,
# housing_median_age,
# total_rooms,
# total_bedrooms,
# population,
# households,
# median_income,
# median_house_value,
# ) = range(0, len(data.columns))
# # Assigning column values to column variables
# dict_for_columns = {}
# for x in range(0, len(data.columns)):
# dict_for_columns[data.columns[x]] = data[data.columns[x]]
# # Defining column variables for use in data analysis
# globals().update(dict_for_columns)
# # Visualizing data
# # test_data.hist(figsize=[20,13])
# # test_data.boxplot(figsize=[20,13])
# # test_data.drop('median_house_value',axis=1).boxplot(figsize=[20,13])
# # Clipping outliers
# total_rooms[total_rooms > 6000] = 6000
# test_data[test_data.columns[3]] = total_rooms
# total_bedrooms[total_bedrooms > 1300] = 1300
# test_data[test_data.columns[4]] = total_bedrooms
# population[population > 3000] = 3000
# test_data[test_data.columns[5]] = population
# households[households > 1250] = 1250
# test_data[test_data.columns[6]] = households
# median_income[median_income > 8.5] = 8.5
# test_data[test_data.columns[7]] = median_income
# print("Clipped test features.")
# Z-Score Normalizing
# columns_for_normalizing = test_data[test_data.columns[0:9]]
# normalized_columns = (
# columns_for_normalizing - columns_for_normalizing.mean()
# ) / columns_for_normalizing.std()
# test_data[normalized_columns.columns] = normalized_columns
# print("Normalized test features.")
# # Revisualizing data
# # test_data.hist(figsize=[20,13])
# # test_data.drop('median_house_value',axis=1).boxplot(figsize=[20,13])
# # Adding new feature calculating the ratio of total bedrooms to total rooms
# test_data["rooms_ratio"] = test_data["total_bedrooms"] / test_data["total_rooms"]
# print("Added new test data feature calculating the ratio of total bedrooms to total rooms.")
################################################################################
################################################################################
# FEATURE ENGINEERING (PART 2)
# Create an empty list that will eventually hold all created feature columns.
# feature_columns = []
# # Establishing resolution by Zs
# resolution_in_Zs = 0.3 # 3/10 of a standard deviation.
# # Create a bucket feature column for latitude.
# latitude_as_a_numeric_column = tf.feature_column.numeric_column("latitude")
# latitude_boundaries = list(
# np.arange(
# int(min(train_data["latitude"])),
# int(max(train_data["latitude"])),
# resolution_in_Zs,
# )
# )
# latitude = tf.feature_column.bucketized_column(
# latitude_as_a_numeric_column, latitude_boundaries
# )
# # Create a bucket feature column for longitude.
# longitude_as_a_numeric_column = tf.feature_column.numeric_column("longitude")
# longitude_boundaries = list(
# np.arange(
# int(min(train_data["longitude"])),
# int(max(train_data["longitude"])),
# resolution_in_Zs,
# )
# )
# longitude = tf.feature_column.bucketized_column(
# longitude_as_a_numeric_column, longitude_boundaries
# )
# # Create a feature cross of latitude and longitude.
# latitude_x_longitude = tf.feature_column.crossed_column(
# [latitude, longitude], hash_bucket_size=100
# )
# crossed_feature = tf.feature_column.indicator_column(latitude_x_longitude)
# feature_columns.append(crossed_feature)
# # Represent median_income as a floating-point value.
# median_income = tf.feature_column.numeric_column("median_income")
# feature_columns.append(median_income)
# # Represent population as a floating-point value.
# population = tf.feature_column.numeric_column("population")
# feature_columns.append(population)
# # Convert the list of feature columns into a layer that will later be fed into the model.
# my_feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
################################################################################
################################################################################
# TRAINING AND EVALUATING MODEL
# The following variables are the hyperparameters.
learning_rate = 0.005
epochs = 200
batch_size = 1000
label_name = "median_house_value"
# Establish model topography.
my_model = create_model(learning_rate, my_feature_layer)
# Train the model on the normalized training set.
epochs, mse = train_model(my_model, train_data, epochs, label_name, batch_size)
plot_the_loss_curve(epochs, mse)
test_features = {name: np.array(value) for name, value in test_data.items()}
test_label = np.array(test_features.pop(label_name)) # isolate the label
print("\n Evaluate the new model against the test set:")
my_model.evaluate(x=test_features, y=test_label, batch_size=batch_size)
################################################################################
################################################################################
# Predicting data using trained model
predicted_values = np.squeeze(my_model.predict(test_features))
print("Predicted data using model.")
################################################################################
################################################################################
# Plotting comparison of predicted to test data in form of R-squared plot
print("Generating R-squared plot to evaluate quality of model prediction of test data.")
plot_r_squared_comparison(
test_label,
predicted_values,
"California Median House Value Prediction Quality\nNo Feature Engineering",
)
################################################################################
| [
"noreply@github.com"
] | Utlak88.noreply@github.com |
a3dc231f3dbd0e2e1ef4dbdd546e09d37e950ff2 | f224fad50dbc182cda86291c83954607bbb60901 | /inference.py | ce98cbf4d15f6bc1e05363be1db9afeb1e519de5 | [] | no_license | Hongpeng1992/pytorch-commands | 7fd26202b7cf7d46a0ac8e1241336e8ca5dad30e | 5853625d9852e948c1ac337547f8078d048699a0 | refs/heads/master | 2020-05-04T15:38:26.704013 | 2019-02-07T07:04:01 | 2019-02-07T07:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,644 | py | import argparse
import io
import os
import csv
import time
import numpy as np
import pandas as pd
from collections import OrderedDict
from datetime import datetime
from dataset import CommandsDataset, get_labels
from models import model_factory
from utils import AverageMeter, get_outdir
import torch
import torch.autograd as autograd
import torch.nn
import torch.nn.functional as F
import torch.utils.data as data
import torchvision.utils
parser = argparse.ArgumentParser(description='Inference')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--model', default='resnet101', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--gp', default='avg', type=str, metavar='POOL',
help='Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('-b', '--batch-size', type=int, default=512, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('-j', '--workers', type=int, default=2, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to restore checkpoint (default: none)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--save-batches', action='store_true', default=False,
help='save images of batch inputs and targets every log interval for debugging/verification')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
def main():
args = parser.parse_args()
num_classes = len(get_labels())
test_time_pool = 0 #5 if 'dpn' in args.model else 0
model = model_factory.create_model(
args.model,
in_chs=1,
num_classes=num_classes,
global_pool=args.gp,
test_time_pool=test_time_pool)
#model.reset_classifier(num_classes=num_classes)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
else:
model.cuda()
if not os.path.exists(args.checkpoint):
print("=> no checkpoint found at '{}'".format(args.checkpoint))
exit(1)
print("=> loading checkpoint '{}'".format(args.checkpoint))
checkpoint = torch.load(args.checkpoint)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.checkpoint, checkpoint['epoch']))
else:
model.load_state_dict(checkpoint)
csplit = os.path.normpath(args.checkpoint).split(sep=os.path.sep)
if len(csplit) > 1:
exp_name = csplit[-2] + '-' + csplit[-1].split('.')[0]
else:
exp_name = ''
if args.output:
output_base = args.output
else:
output_base = './output'
output_dir = get_outdir(output_base, 'predictions', exp_name)
dataset = CommandsDataset(
root=args.data,
mode='test',
format='spectrogram'
)
loader = data.DataLoader(
dataset,
batch_size=args.batch_size,
pin_memory=True,
shuffle=False,
num_workers=args.workers
)
model.eval()
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
try:
# open CSV for writing predictions
cf = open(os.path.join(output_dir, 'results.csv'), mode='w')
res_writer = csv.writer(cf)
res_writer.writerow(['fname'] + dataset.id_to_label)
# open CSV for writing submission
cf = open(os.path.join(output_dir, 'submission.csv'), mode='w')
sub_writer = csv.writer(cf)
sub_writer.writerow(['fname', 'label', 'prob'])
end = time.time()
batch_sample_idx = 0
for batch_idx, (input, target) in enumerate(loader):
data_time_m.update(time.time() - end)
input = input.cuda()
output = model(input)
# augmentation reduction
#reduce_factor = loader.dataset.get_aug_factor()
#if reduce_factor > 1:
# output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2).squeeze(dim=2)
# index = index[0:index.size(0):reduce_factor]
# move data to CPU and collect)
output_logprob = F.log_softmax(output, dim=1).cpu().numpy()
output = F.softmax(output, dim=1)
output_prob, output_idx = output.max(1)
output_prob = output_prob.cpu().numpy()
output_idx = output_idx.cpu().numpy()
for i in range(output_logprob.shape[0]):
index = batch_sample_idx + i
pred_label = dataset.id_to_label[output_idx[i]]
pred_prob = output_prob[i]
filename = dataset.filename(index)
res_writer.writerow([filename] + list(output_logprob[i]))
sub_writer.writerow([filename] + [pred_label, pred_prob])
batch_sample_idx += input.size(0)
batch_time_m.update(time.time() - end)
if batch_idx % args.print_freq == 0:
print('Inference: [{}/{} ({:.0f}%)] '
'Time: {batch_time.val:.3f}s, {rate:.3f}/s '
'({batch_time.avg:.3f}s, {rate_avg:.3f}/s) '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
batch_sample_idx, len(loader.sampler),
100. * batch_idx / len(loader),
batch_time=batch_time_m,
rate=input.size(0) / batch_time_m.val,
rate_avg=input.size(0) / batch_time_m.avg,
data_time=data_time_m))
end = time.time()
# end iterating through dataset
except KeyboardInterrupt:
pass
except Exception as e:
print(str(e))
if __name__ == '__main__':
main()
| [
"rwightman@gmail.com"
] | rwightman@gmail.com |
ad937e5bdb44e8c8d3bb32af90bd346163310b48 | b99ce2b1bc8ac32976cf1762c8df7bc74365e403 | /models/networks.py | c816a0edf200d1544f3b93f4aa93110c3d40a629 | [
"Apache-2.0"
] | permissive | KoryakovDmitry/TGRNet | 8a7b2ad34cc2cc481f90bd9f9b0745368cdcc705 | afef2835a8f3ff0d2f6573dda025e3115e0d3400 | refs/heads/main | 2023-08-04T02:29:29.361763 | 2021-10-05T15:03:12 | 2021-10-05T15:03:12 | 413,845,598 | 0 | 0 | Apache-2.0 | 2021-10-05T14:11:32 | 2021-10-05T14:11:31 | null | UTF-8 | Python | false | false | 29,516 | py | import torch
import torch.nn as nn
from torch_geometric.nn import GCNConv
from torch_geometric.data import Data as GraphData
from torch_geometric.data import Batch as GraphBatch
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
from torchvision import models
from torchvision import ops
from torchvision.ops import boxes as box_ops
import numpy as np
import cv2, os
from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN
from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
from torchvision.ops import MultiScaleRoIAlign
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from torchvision.models._utils import IntermediateLayerGetter
from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork
from torch.jit.annotations import Tuple, List, Dict, Optional
from collections import OrderedDict
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = lambda x: Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.niter> epochs
and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1 and m.affine: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, use_distributed, gpu_id, no_init=False, init_type='normal', init_gain=0.02):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
"""
if use_distributed:
assert(torch.cuda.is_available())
net.to(torch.device('cuda'))
net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[gpu_id])
if not no_init:
init_weights(net, init_type, init_gain=init_gain)
return net
def define_ResNet50(gpu_ids=[]):
net = models.resnet50(pretrained=True)
#net = nn.Sequential(*list(net.children())[:-2])
net = ResNet50(net)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
return net
def resnet_fpn_backbone(backbone_name, pretrained, use_distributed, gpu_id, norm_layer=ops.misc.FrozenBatchNorm2d, trainable_layers=5):
backbone = models.resnet.__dict__[backbone_name](
pretrained=pretrained,
norm_layer=ops.misc.FrozenBatchNorm2d)
# select layers that wont be frozen
assert trainable_layers <= 5 and trainable_layers >= 0
layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]
# freeze layers
for name, parameter in backbone.named_parameters():
if all([not name.startswith(layer) for layer in layers_to_train]):
parameter.requires_grad_(False)
return_layers = {'layer1': '0', 'layer2': '1', 'layer3': '2', 'layer4': '3'}
#return_layers = {'layer2': '0', 'layer3': '1', 'layer4': '2'}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = 256
net = BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels)
net = FeatureFusionForFPN(net)
## initalize the FeatureFusion layers
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
init.normal_(m.weight.data, 0.0, 0.02)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
for submodule in net.children():
if submodule.__class__.__name__ != "BackboneWithFPN":
submodule.apply(init_func)
return init_net(net, use_distributed, gpu_id, no_init=True)
def cell_seg_head(use_distributed, gpu_id):
net = Cell_Bbox_Seg()
return init_net(net, use_distributed, gpu_id)
def cell_loc_head(rows_classes, cols_classes, img_h, img_w, alpha, device, use_distributed, gpu_id):
net = Cell_Lloc_Pre(rows_classes, cols_classes, img_h, img_w, alpha, device)
return init_net(net, use_distributed, gpu_id)
##############################################################################
# Classes
##############################################################################
class OrdinalRegressionLoss(nn.Module):
"""
"""
def __init__(self, num_class, gamma=None):
"""
"""
super(OrdinalRegressionLoss, self).__init__()
self.num_class = num_class
self.gamma = torch.as_tensor(gamma, dtype=torch.float32)
def _create_ordinal_label(self, gt):
gamma_i = torch.ones(list(gt.shape)+[self.num_class-1])*self.gamma
gamma_i = gamma_i.to(gt.device)
gamma_i = torch.stack([gamma_i,gamma_i],-1)
ord_c0 = torch.ones(list(gt.shape)+[self.num_class-1]).to(gt.device)
mask = torch.zeros(list(gt.shape)+[self.num_class-1])+torch.linspace(0, self.num_class - 2, self.num_class - 1, requires_grad=False)
mask = mask.contiguous().long().to(gt.device)
mask = (mask >= gt.unsqueeze(len(gt.shape)))
ord_c0[mask] = 0
ord_c1 = 1-ord_c0
ord_label = torch.stack([ord_c0,ord_c1],-1)
return ord_label.long(), gamma_i
def __call__(self, prediction, target):
# original
#ord_label = self._create_ordinal_label(target)
#pred_score = F.log_softmax(prediction,dim=-1)
#entropy = -pred_score * ord_label
#entropy = entropy.view(-1,2,(self.num_class-1)*2)
#loss = torch.sum(entropy, dim=-1).mean()
# using nn.CrossEntropyLoss()
#ord_label = self._create_ordinal_label(target)
#criterion = nn.CrossEntropyLoss().to(ord_label.device)
#loss = criterion(prediction, ord_label)
# add focal
ord_label, gamma_i = self._create_ordinal_label(target)
pred_score = F.softmax(prediction,dim=-1)
pred_logscore = F.log_softmax(prediction,dim=-1)
entropy = -ord_label * torch.pow((1-pred_score), gamma_i) * pred_logscore
entropy = entropy.view(-1,2,(self.num_class-1)*2)
loss = torch.sum(entropy,dim=-1)
return loss.mean()
class BackboneWithFPN(nn.Module):
"""
copy from https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py
without extra_blocks=LastLevelMaxPool() in FeaturePyramidNetwork
"""
def __init__(self, backbone, return_layers, in_channels_list, out_channels):
super(BackboneWithFPN, self).__init__()
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
)
self.out_channels = out_channels
def forward(self, x):
x = self.body(x)
x = self.fpn(x)
return x
class FeatureFusionForFPN(nn.Module):
def __init__(self, backbone):
super(FeatureFusionForFPN, self).__init__()
self.fpn_backbone = backbone
self.layer1_bn_relu = nn.Sequential(
#nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.layer2_bn_relu = nn.Sequential(
#nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.layer3_bn_relu = nn.Sequential(
#nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.layer4_bn_relu = nn.Sequential(
#nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.smooth1 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.smooth2 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.smooth3 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
def _upsample(self, x, y, scale=1):
_, _, H, W = y.size()
#return F.upsample(x, size=(H // scale, W // scale), mode='bilinear')
return nn.functional.interpolate(x, size=(H // scale, W // scale), mode='bilinear', align_corners=False)
def _upsample_add(self, x, y):
_, _, H, W = y.size()
#return F.upsample(x, size=(H, W), mode='bilinear') + y
return nn.functional.interpolate(x, size=(H, W), mode='bilinear', align_corners=False) + y
def forward(self, x):
fpn_outputs = self.fpn_backbone(x)
#print(fpn_outputs['0'].shape,fpn_outputs['1'].shape,fpn_outputs['2'].shape)
# the output of a group of fpn feature:
# [('0', torch.Size([1, 256, 128, 128])),
# ('1', torch.Size([1, 256, 64, 64])),
# ('2', torch.Size([1, 256, 32, 32])),
# ('3', torch.Size([1, 256, 16, 16]))]
layer1 = self.layer1_bn_relu(fpn_outputs['0'])
layer2 = self.layer2_bn_relu(fpn_outputs['1'])
layer3 = self.layer3_bn_relu(fpn_outputs['2'])
layer4 = self.layer4_bn_relu(fpn_outputs['3'])
fusion4_3 = self.smooth1(self._upsample_add(layer4, layer3))
fusion4_2 = self.smooth2(self._upsample_add(fusion4_3, layer2))
fusion4_1 = self.smooth3(self._upsample_add(fusion4_2, layer1))
fusion4_2 = self._upsample(fusion4_2, fusion4_1)
fusion4_3 = self._upsample(fusion4_3, fusion4_1)
layer4 = self._upsample(layer4, fusion4_1)
#fusion4_3 = self._upsample(fusion4_3, fusion4_2)
#layer4 = self._upsample(layer4, fusion4_2)
inter_feat = torch.cat((fusion4_1, fusion4_2, fusion4_3, layer4), 1) # [N, 1024, H, W]
inter_feat = self._upsample(inter_feat, x) # [N, 1024, x_h, x_w]
#inter_feat = torch.cat((fusion4_2, fusion4_3, layer4), 1) # [N, 1024, H, W]
#inter_feat = self._upsample(inter_feat, x) # [N, 1024, x_h, x_w]
return inter_feat
class Cell_Bbox_Seg(nn.Module):
def __init__(self, in_channels = 1024, num_classes=3):
super(Cell_Bbox_Seg, self).__init__()
self.decode_out = nn.Sequential(
nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=1),
#nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.row_out = nn.Sequential(
nn.Conv2d(256, 64, kernel_size=(3,1), stride=1, padding=(1,0)),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=(3,1), stride=1, padding=(1,0)),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, num_classes, kernel_size=1, stride=1),
#nn.BatchNorm2d(num_classes),
nn.LeakyReLU(inplace=True)
)
self.col_out = nn.Sequential(
nn.Conv2d(256, 64, kernel_size=(1,3), stride=1, padding=(0,1)),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=(1,3), stride=1, padding=(0,1)),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, num_classes, kernel_size=1, stride=1),
#nn.BatchNorm2d(num_classes),
nn.LeakyReLU(inplace=True)
)
self.twodim_out = nn.Sequential(
nn.Conv2d(256, 64, kernel_size=3, stride=1, padding=1),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
#nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, num_classes, kernel_size=1, stride=1),
#nn.BatchNorm2d(num_classes),
nn.LeakyReLU(inplace=True)
)
self.fusion = nn.Sequential(
nn.Conv2d(num_classes*3, num_classes, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(inplace=True)
)
def postprocess(self, row_pred, col_pred, seg_pred, table_names=None):
#pred_mat = torch.argmax(row_pred,dim=1) * torch.argmax(col_pred,dim=1)
pred_mat = torch.argmax(seg_pred,dim=1)
pred_mat = pred_mat.data.cpu().int().numpy()
pred_mat[np.where(pred_mat>2)] = 2
pred_mask = np.where(pred_mat == 1, 255, 0).astype('uint8')
#self.vis_seg(pred_mask, table_names, '/data/xuewenyuan/dev/tablerec/results/delet_vis')
N, H, W = pred_mask.shape
batch_bboxes = []
for ind in range(N):
contours = cv2.findContours(pred_mask[ind].copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
bboxes = [[ct[:,:,0].min()-2, ct[:,:,1].min()-2, ct[:,:,0].max()+2, ct[:,:,1].max()+2] for ct in contours]
bboxes = torch.as_tensor(bboxes).to(torch.float32)
batch_bboxes.append(bboxes)
return batch_bboxes
def vis_seg(self, label_mat, table_names, vis_path):
if not os.path.exists(vis_path):
os.makedirs(vis_path)
batch_size = len(table_names)
for ind in range(batch_size):
vis_mat = np.zeros((label_mat[ind].shape[0],label_mat[ind].shape[1],3),dtype=np.int32)
vis_mat[np.where(label_mat[ind] == 0)] = np.array([255,0,0],dtype=np.int32)
vis_mat[np.where(label_mat[ind] == 1)] = np.array([0,255,0],dtype=np.int32)
vis_mat[np.where(label_mat[ind] == 2)] = np.array([0,0,255],dtype=np.int32)
cv2.imwrite(os.path.join(vis_path,table_names[ind]+'_pred.png'), vis_mat.astype('uint8'))
def forward(self, input):
decode_feat = self.decode_out(input)
#decode_feat = nn.functional.interpolate(decode_feat, size=(src_img_shape[2], src_img_shape[3]), mode='bilinear', align_corners=False)
seg_pred = self.twodim_out(decode_feat)
row_pred = self.row_out(torch.mean(decode_feat, 3, True))
col_pred = self.col_out(torch.mean(decode_feat, 2, True))
row_expand = torch.repeat_interleave(row_pred, input.shape[3], dim = 3)
col_expand = torch.repeat_interleave(col_pred, input.shape[2], dim = 2)
seg_pred = self.fusion(torch.cat((seg_pred,row_expand,col_expand),1))
#det_bboxes = self.postprocess(row_pred, col_pred, None)
det_bboxes = self.postprocess(None, None, seg_pred)
return row_pred, col_pred, seg_pred, det_bboxes
class Cell_Lloc_Pre(nn.Module):
def __init__(self, rows_classes, cols_classes, img_h, img_w, alpha, device,
in_channels = 1024, cnn_emb_feat = 512, box_emb_feat = 256, gcn_out_feat = 512,
cell_iou_thresh = 0.5, min_cells_percent = 1.0):
super(Cell_Lloc_Pre, self).__init__()
self.cell_iou_thresh = cell_iou_thresh
self.min_cells_percent = min_cells_percent
self.img_h = img_h
self.img_w = img_w
self.device = device
self.rows_classes = rows_classes
self.cols_classes = cols_classes
self.alpha = alpha
self.decode_out = nn.Sequential(
nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=1),
#nn.BatchNorm2d(256,affine=False),
nn.ReLU(inplace=True)
)
self.cnn_emb = nn.Sequential(
nn.Linear(256*2*2, cnn_emb_feat),
#nn.BatchNorm1d(cnn_emb_feat,affine=False),
nn.ReLU(inplace=True)
)
self.box_emb = nn.Sequential(
nn.Linear(4, box_emb_feat),
#nn.BatchNorm1d(box_emb_feat,affine=False),
nn.ReLU(inplace=True)
)
self.gconv_row = GCNConv(cnn_emb_feat+box_emb_feat, gcn_out_feat)
self.gconv_col = GCNConv(cnn_emb_feat+box_emb_feat, gcn_out_feat)
self.row_cls = nn.Sequential(
nn.Linear(gcn_out_feat, 2*(rows_classes-1)*2),
#nn.BatchNorm1d(2*(rows_classes-1)*2,affine=False),
nn.LeakyReLU(inplace=True)
)
self.col_cls = nn.Sequential(
nn.Linear(gcn_out_feat, 2*(cols_classes-1)*2),
#nn.BatchNorm1d(2*(cols_classes-1)*2,affine=False),
nn.LeakyReLU(inplace=True)
)
def get_box_feat(self, cell_boxes):
# roi_bboxes: List(Tensor(x1,y1,x2,y2))
# image_shapes: [N,C,H,W]
boxes = torch.cat(cell_boxes, dim=0)
box_w = boxes[:,2]-boxes[:,0]
box_h = boxes[:,3]-boxes[:,1]
ctr_x = (boxes[:,2]+boxes[:,0])/2
ctr_y = (boxes[:,3]+boxes[:,1])/2
#rel_x = torch.log(ctr_x/self.img_w)
#rel_y = torch.log(ctr_y/self.img_h)
#rel_w = torch.log(box_w/self.img_w)
#rel_h = torch.log(box_h/self.img_h)
rel_x = ctr_x/self.img_w
rel_y = ctr_y/self.img_h
rel_w = box_w/self.img_w
rel_h = box_h/self.img_h
boxes_feat = torch.stack((rel_x,rel_y,rel_w,rel_h),dim=1)
return boxes_feat
def edge_weight(self, edge_ind, cell_boxes, im_scale, pdl, pdt):
assert cell_boxes.size(1) == 4
assert edge_ind.size(1) == 2
org_box = (cell_boxes - torch.stack((pdl,pdt)*2))/im_scale
centr_x1 = (org_box[edge_ind[:,0],0] + org_box[edge_ind[:,0],2]) / 2
centr_y1 = (org_box[edge_ind[:,0],1] + org_box[edge_ind[:,0],3]) / 2
centr_x2 = (org_box[edge_ind[:,1],0] + org_box[edge_ind[:,1],2]) / 2
centr_y2 = (org_box[edge_ind[:,1],1] + org_box[edge_ind[:,1],3]) / 2
tb_w = org_box[:,[0,2]].max()
tb_h = org_box[:,[1,3]].max()
row_attr = torch.exp(-(torch.square((centr_y1-centr_y2)*self.alpha/tb_h)))
col_attr = torch.exp(-(torch.square((centr_x1-centr_x2)*self.alpha/tb_w)))
return row_attr, col_attr
def build_graph(self, cell_boxes, im_scales, pdls, pdts):
#device = roi_bboxes[0].device
num_images = len(cell_boxes)
graphs = []
for img_id in range(num_images):
edge_ind = []
num_nodes = cell_boxes[img_id].shape[0]
for n1 in range(num_nodes):
for n2 in range(num_nodes):
if n1 == n2: continue
edge_ind.append([n1,n2])
edge_ind = torch.as_tensor(edge_ind, dtype=torch.int64)
#print(edge_ind.t())
#edge_attr = self.edge_weight(edge_ind,cell_boxes[img_id], im_scales[img_id], pdls[img_id], pdts[img_id])
row_attr, col_attr = self.edge_weight(edge_ind,cell_boxes[img_id], im_scales[img_id], pdls[img_id], pdts[img_id])
#row_attr, col_attr, row_edge, col_edge = self.edge_weight(edge_ind,cell_boxes[img_id], im_scales[img_id], pdls[img_id], pdts[img_id])
tb_graph = GraphData(edge_index=edge_ind.t(), num_nodes = num_nodes)
tb_graph.row_attr = row_attr
tb_graph.col_attr = col_attr
#tb_graph.row_edge = row_edge.t()
#tb_graph.col_edge = col_edge.t()
graphs.append(tb_graph)
graphs = GraphBatch.from_data_list(graphs).to(self.device)
#print('graph')
#print(graphs.edge_index, graphs.edge_attr)
return graphs
def filter_box(self, pred_boxes, gt_boxes):
batch_size = len(gt_boxes)
train_boxes = []
train_inds = []
count = 0
for b_ind in range(batch_size):
if pred_boxes[b_ind].size(0) != 0:
match_quality_matrix = box_ops.box_iou(pred_boxes[b_ind], gt_boxes[b_ind])
# find best pred candidate for each gt
matched_val, matched_ind = match_quality_matrix.max(dim=0)
rm_gts = torch.where(matched_val>self.cell_iou_thresh)[0]
else:
rm_gts = torch.Tensor([])
res_ind = torch.as_tensor([ i for i in range(gt_boxes[b_ind].size(0)) if (i not in rm_gts)], dtype=torch.int32)
#res_gt_boxes = gt_boxes[b_ind][res_ind]
num_preserved = ((torch.rand((1,))+self.min_cells_percent*10)/10*gt_boxes[b_ind].shape[0]).to(torch.int32) # [0.9 ~ 1)
num_preserved = max(num_preserved - rm_gts.shape[0], 0)
preserved_ind = torch.randperm(len(res_ind))[:num_preserved]
#pred_ind = matches[rm_gts]
#select_boxes = torch.cat((res_gt_boxes[preserved_ind],pred_boxes[b_ind][pred_ind]), dim=0)
#train_boxes.append(select_boxes)
boxes = []
for box_i in range(gt_boxes[b_ind].size(0)):
if box_i in res_ind[preserved_ind]:
boxes.append(gt_boxes[b_ind][box_i])
train_inds.append(count+box_i)
elif box_i in rm_gts:
pred_ind = matched_ind[box_i]
boxes.append(pred_boxes[b_ind][pred_ind])
train_inds.append(count+box_i)
train_boxes.append(torch.stack(boxes,dim=0))
count += gt_boxes[b_ind].size(0)
return train_boxes, train_inds
def forward(self, input, pred_cell_boxes, im_scales, pdls, pdts, gt_cell_boxes=None):
train_inds = None
if (gt_cell_boxes is not None) and (pred_cell_boxes is not None):
cell_boxes, train_inds = self.filter_box(pred_cell_boxes, gt_cell_boxes)
elif (pred_cell_boxes is None) and (gt_cell_boxes is not None):
cell_boxes = []
for img_boxes in gt_cell_boxes:
num_node = img_boxes.size(0)
if num_node < 2:
cell_boxes.append(torch.cat((img_boxes, torch.as_tensor([[0,0,0,0]]*(2-num_node)).to(torch.float32).to(img_boxes.device)),0))
else:
cell_boxes.append(img_boxes)
elif (gt_cell_boxes is None) and (pred_cell_boxes is not None):
cell_boxes = []
for img_boxes in pred_cell_boxes:
num_node = img_boxes.size(0)
if num_node < 2:
cell_boxes.append(torch.cat((img_boxes, torch.as_tensor([[0,0,0,0]]*(2-num_node)).to(torch.float32).to(img_boxes.device)),0))
else:
cell_boxes.append(img_boxes)
box_feat = self.get_box_feat(cell_boxes)
box_feat = self.box_emb(box_feat).to(self.device)
decode_feat = self.decode_out(input)
bbox_count = [i.shape[0] for i in cell_boxes]
cnn_feat = ops.roi_align(decode_feat, cell_boxes, 2) #[num_node, 256, 2, 2]
cnn_feat = self.cnn_emb(cnn_feat.view(cnn_feat.size(0), -1))
graphs = self.build_graph(cell_boxes, im_scales, pdls, pdts)
fusion_feat = torch.cat([box_feat, cnn_feat], dim=1)
row_feat = self.gconv_row(fusion_feat, graphs.edge_index, graphs.row_attr)
row_feat = F.relu(row_feat)
col_feat = self.gconv_col(fusion_feat, graphs.edge_index, graphs.col_attr)
col_feat = F.relu(col_feat)
cls_row_score = self.row_cls(row_feat)
cls_col_score = self.col_cls(col_feat)
#cls_row_score = torch.reshape(cls_row_score, (cls_row_score.size(0), self.rows_classes, 2))
#cls_col_score = torch.reshape(cls_col_score, (cls_col_score.size(0), self.cols_classes, 2))
cls_row_score = torch.reshape(cls_row_score, (cls_row_score.size(0), 2, self.rows_classes-1, 2))
cls_col_score = torch.reshape(cls_col_score, (cls_col_score.size(0), 2, self.cols_classes-1, 2))
return cls_row_score, cls_col_score, train_inds
| [
"15120452@bjtu.edu"
] | 15120452@bjtu.edu |
14117448fe850d69ae5fcf1bd41049c19247b557 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /appmesh_write_2/virtual-router_delete.py | db6df7702ffc69ca7d3bbf5c3eda2b1680913ce2 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/delete-virtual-router.html
if __name__ == '__main__':
"""
create-virtual-router : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/create-virtual-router.html
describe-virtual-router : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/describe-virtual-router.html
list-virtual-routers : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/list-virtual-routers.html
update-virtual-router : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/appmesh/update-virtual-router.html
"""
parameter_display_string = """
# mesh-name : The name of the service mesh to delete the virtual router in.
# virtual-router-name : The name of the virtual router to delete.
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("appmesh", "delete-virtual-router", "mesh-name", "virtual-router-name", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
3a5e3c2e15f0e9ec3f7c4f139ac435cef942c429 | 84b08a60e49e702e51b8c3bd0c558fbd957e11ae | /LatestAlgorithms/SVM.py | 28692dc75e49da2349af66a00b5e9ad5073e963c | [] | no_license | akhalayly/GoldenBoy | 787732656250bc52ad0076dca35f15abbd2f4f14 | fb88b656525c3bc614a24b982acf4d1ae745aa8b | refs/heads/main | 2023-02-06T02:17:53.197336 | 2020-12-28T20:07:14 | 2020-12-28T20:07:14 | 304,894,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,205 | py | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import KFold
from sklearn.feature_selection import SelectKBest, chi2
from sklearn import svm
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import Positions_Traits as posT
import helperFunctions as hf
if __name__ == '__main__':
files = ["CAMS", "CBs", "CMs", "CDMs", "GKs", "LBs", "LMs", "RBs", "RMs",
"Strikers"]
for file in files:
dataset = pd.read_csv("Success_" + file + ".csv")
attrbs = []
attrbs = attrbs + hf.roleTraitIndexesFinder(["Age"], dataset.columns, hf.year_2012)
attrbs = attrbs + hf.roleTraitIndexesFinder(posT.General_Info, dataset.columns, "")
attrbs = attrbs + hf.roleTraitIndexesFinder(posT.Positive_Traits, dataset.columns, hf.year_2012)
for role in posT.positionToTraits[file]:
attrbs = attrbs + hf.roleTraitIndexesFinder(role, dataset.columns, hf.year_2012)
attrbs = list(set(attrbs))
X = dataset.iloc[:, attrbs].values.astype(float)
y = dataset.iloc[:, -1].values
X = hf.normalizeAge(hf.normalizeMarketValue(hf.normalizeCA(X, 1), -1, file), 0)
# X = SelectKBest(chi2, k=10).fit_transform(X, y)
kf = KFold(n_splits=5)
splits = []
kernel_results = {
'linear': [],
'poly': [],
'rbf': [],
'sigmoid': []
}
for train, test in kf.split(X):
splits.append((train, test))
for kernel in ['linear', 'poly', 'rbf', 'sigmoid']:
c_kernel_results = [0] * 6
index = 0
for c in [0.01, 0.1, 0.5, 1, 2, 5]:
for train_index, test_index in splits:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = svm.SVC(kernel=kernel, C=c)
clf.fit(X_train, y_train)
pred_i = clf.predict(X_test)
choseOne = 0
choseZero = 0
c_kernel_results[index] += ((1 - np.mean(pred_i != y_test)) / splits.__len__())
index += 1
kernel_results[kernel] = c_kernel_results
# print("Timo prediction is: " + str(clf.predict([X[111]])[0]))
# for i in range(len(pred_i)):
# if pred_i[i] != y_test[i] and pred_i[i] == 1:
# choseOne = choseOne + 1
# elif pred_i[i] != y_test[i]:
# choseZero = choseZero + 1
# print("choseZero: " + str(len(pred_i) - sum(pred_i)) + " choseZero: " + str(
# choseZero) + " Ratio of wrong Zeros: " + str(choseZero / (len(pred_i) - sum(pred_i))))
# print("choseOne: " + str(sum(pred_i)) + " choseOneWrong: " + str(
# choseOne) + " Ratio of wrong Ones: " + str(choseOne / sum(pred_i)))
print(kernel + " Last Accuracy is: " + str(kernel_results[kernel]))
plt.figure(figsize=(12, 6))
plt.plot([0.01, 0.1, 0.5, 1, 2, 5], kernel_results['linear'], color='red', marker='o',
markerfacecolor='red', markersize=10)
plt.plot([0.01, 0.1, 0.5, 1, 2, 5], kernel_results['poly'], color='blue', marker='o',
markerfacecolor='blue', markersize=10)
plt.plot([0.01, 0.1, 0.5, 1, 2, 5], kernel_results['rbf'], color='black', marker='o',
markerfacecolor='black', markersize=10)
plt.plot([0.01, 0.1, 0.5, 1, 2, 5], kernel_results['sigmoid'], color='brown', marker='o',
markerfacecolor='brown', markersize=10)
plt.title('Accuracy Rate SVM ' + file)
plt.xlabel('C Value')
plt.ylabel('Mean Accuracy')
plt.legend([str(i) for i in kernel_results.keys()])
plt.show()
| [
"noreply@github.com"
] | akhalayly.noreply@github.com |
53fa6c563e9983afb729af1af3be08c9c03dd4a1 | 8792e3449fbc6c8dec99f6af1d9f1b4caddad1f7 | /51player.py | 470f81860462904d56f98294142a2c26cd476828 | [] | no_license | aarthisandhiya/aarthisandhiya1 | c19c1951c9ba01cd97eeddd44614953088718357 | e6f10247b6a84d6eaf371a23f2f9c3bebbc73e5b | refs/heads/master | 2020-04-15T17:17:07.151242 | 2019-05-20T05:24:19 | 2019-05-20T05:24:19 | 164,868,494 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | a=int(input())
s=[int(a) for a in input().split()]
s=list(s)
z=[]
for i in range(0,len(s)):
val=s[i]
i=i-1
while i>=0:
if val<s[i]:
s[i+1]=s[i]
s[i]=val
i=i-1
else:
break
print(s[1])
| [
"noreply@github.com"
] | aarthisandhiya.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.