repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
liuxb555/earthengine-py-examples | MachineLearning/clustering.py | <filename>MachineLearning/clustering.py
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load a pre-computed Landsat composite for input.
input = ee.Image('LANDSAT/LE7_TOA_1YEAR/2001')
# Define a region in which to generate a sample of the input.
region = ee.Geometry.Rectangle(29.7, 30, 32.5, 31.7)
# Display the sample region.
Map.setCenter(31.5, 31.0, 8)
Map.addLayer(ee.Image().paint(region, 0, 2), {}, 'region')
# Make the training dataset.
training = input.sample(**{
'region': region,
'scale': 30,
'numPixels': 5000
})
# Instantiate the clusterer and train it.
clusterer = ee.Clusterer.wekaKMeans(15).train(training)
# Cluster the input using the trained clusterer.
result = input.cluster(clusterer)
# Display the clusters with random colors.
Map.addLayer(result.randomVisualizer(), {}, 'clusters')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Image/texture.py | <filename>Image/texture.py<gh_stars>10-100
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
import math
# Load a high-resolution NAIP image.
image = ee.Image('USDA/NAIP/DOQQ/m_3712213_sw_10_1_20140613')
# Zoom to San Francisco, display.
Map.setCenter(-122.466123, 37.769833, 17)
Map.addLayer(image, {'max': 255}, 'image')
# Get the NIR band.
nir = image.select('N')
# Define a neighborhood with a kernel.
square = ee.Kernel.square(**{'radius': 4})
# Compute entropy and display.
entropy = nir.entropy(square)
Map.addLayer(entropy,
{'min': 1, 'max': 5, 'palette': ['0000CC', 'CC0000']},
'entropy')
# Compute the gray-level co-occurrence matrix (GLCM), get contrast.
glcm = nir.glcmTexture(**{'size': 4})
contrast = glcm.select('N_contrast')
Map.addLayer(contrast,
{'min': 0, 'max': 1500, 'palette': ['0000CC', 'CC0000']},
'contrast')
# Create a list of weights for a 9x9 kernel.
list = [1, 1, 1, 1, 1, 1, 1, 1, 1]
# The center of the kernel is zero.
centerList = [1, 1, 1, 1, 0, 1, 1, 1, 1]
# Assemble a list of lists: the 9x9 kernel weights as a 2-D matrix.
lists = [list, list, list, list, centerList, list, list, list, list]
# Create the kernel from the weights.
# Non-zero weights represent the spatial neighborhood.
kernel = ee.Kernel.fixed(9, 9, lists, -4, -4, False)
# Convert the neighborhood into multiple bands.
neighs = nir.neighborhoodToBands(kernel)
# Compute local Geary's C, a measure of spatial association.
gearys = nir.subtract(neighs).pow(2).reduce(ee.Reducer.sum()) \
.divide(math.pow(9, 2))
Map.addLayer(gearys,
{'min': 20, 'max': 2500, 'palette': ['0000CC', 'CC0000']},
"Geary's C")
# Display the map.
Map
|
liuxb555/earthengine-py-examples | ImageCollection/metadata.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load a Landsat 8 ImageCollection for a single path-row.
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34)) \
.filterDate('2014-03-01', '2014-08-01')
print('Collection: ', collection.getInfo())
# Get the number of images.
count = collection.size()
print('Count: ', count.getInfo())
# Get the date range of images in the collection.
range = collection.reduceColumns(ee.Reducer.minMax(), ["system:time_start"])
print('Date range: ', ee.Date(range.get('min')).getInfo(), ee.Date(range.get('max')).getInfo())
# Get statistics for a property of the images in the collection.
sunStats = collection.aggregate_stats('SUN_ELEVATION')
print('Sun elevation statistics: ', sunStats.getInfo())
# Sort by a cloud cover property, get the least cloudy image.
image = ee.Image(collection.sort('CLOUD_COVER').first())
print('Least cloudy image: ', image.getInfo())
# Limit the collection to the 10 most recent images.
recent = collection.sort('system:time_start', False).limit(10)
print('Recent images: ', recent.getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Gena/landsat_median.py | <reponame>liuxb555/earthengine-py-examples<gh_stars>10-100
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA')\
.filter(ee.Filter.eq('WRS_PATH', 44))\
.filter(ee.Filter.eq('WRS_ROW', 34))\
.filterDate('2014-01-01', '2015-01-01')
median = collection.median()
Map.setCenter(-122.3578, 37.7726, 12)
Map.addLayer(median, {"bands": ['B4', 'B3', 'B2'], "max": 0.3}, 'median')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | GetStarted/07_image_statistics.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load and display a Landsat TOA image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
Map.addLayer(image, {'bands': ['B4', 'B3', 'B2'], 'max': 0.3}, 'Landsat 8')
# Create an arbitrary rectangle as a region and display it.
region = ee.Geometry.Rectangle(-122.2806, 37.1209, -122.0554, 37.2413)
Map.centerObject(ee.FeatureCollection(region), 13)
Map.addLayer(ee.Image().paint(region, 0, 2), {}, 'Region')
# Get a dictionary of means in the region. Keys are bandnames.
mean = image.reduceRegion(**{
'reducer': ee.Reducer.mean(),
'geometry': region,
'scale': 30
})
print(mean.getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | FeatureCollection/extract_image_by_polygon.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Extract landsant image based on individual polygon
def extract_landsat(feature):
geom = ee.Feature(feature).geometry()
image = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR') \
.filterDate('2019-01-01', '2019-12-31') \
.filterBounds(geom) \
.median() \
.clip(geom)
return image
# Select the first five U.S. counties
counties = ee.FeatureCollection('TIGER/2018/Counties').toList(5)
Map.setCenter(-110, 40, 5)
Map.addLayer(ee.Image().paint(counties, 0, 2), {'palette': 'red'}, "Selected Counties")
# Extract Landsat image for each county
images = counties.map(extract_landsat)
# Add images to map
for i in range(0, 5):
image = ee.Image(images.get(i))
vis = {'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 3000, 'gamma': 1.4}
Map.addLayer(image, vis, 'Image ' + str(i+1))
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/sentinel-2.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
img = ee.Image("COPERNICUS/S2_SR/20191115T074201_20191115T075706_T37MBM")
ndvi = img.normalizedDifference(['B8','B4'])
pal = ["red","yellow","green"]
Map.setCenter(36.9,-7.7, 12)
Map.addLayer(ndvi,{'min':0,'max':0.8,'palette':pal},'NDVI')
# Display the map.
Map
|
lotusxai/join-face-detection-project | src/train.py | import numpy as np
import pandas as pd
import predict
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
import time
from datetime import datetime
import warnings
import os
warnings.filterwarnings('ignore')
# ML libraries
import lightgbm as lgb
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
le = preprocessing.LabelEncoder()
if __name__ == "__main__":
predict.main()
|
lotusxai/join-face-detection-project | src/predict.py | import train
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
import time
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')
# ML libraries
import lightgbm as lgb
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
le = preprocessing.LabelEncoder()
def main():
# test = os.environ.get("TEST_DATA")
# train_data = os.environ.get("TRAINING_DATA")
TRAINING_DATA_DIR = os.environ.get("TRAINING_DATA")
TEST_DATA = os.environ.get("TEST_DATA")
train_data = pd.read_csv(TRAINING_DATA_DIR)
test = pd.read_csv(TEST_DATA)
if __name__ == "__main__":
main()
|
lotusxai/join-face-detection-project | src/dataset.py | <gh_stars>1-10
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
import time
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')
import os
# ML libraries
import lightgbm as lgb
import xgboost as xgb
import librosa.display
TRAINING_DATA_DIR = os.environ.get("TRAINING_DATA")
TEST_DATA = os.environ.get("TEST_DATA")
train_data = pd.read_csv(TRAINING_DATA_DIR)
test = pd.read_csv(TEST_DATA)
print(train_data.head(5))
print(train_data.describe())
|
lotusxai/join-face-detection-project | src/preprocessing.py | <filename>src/preprocessing.py
import numpy as np
import pandas as pd
import predict
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
import time
from datetime import datetime
import warnings
import os
warnings.filterwarnings('ignore')
# ML libraries
import lightgbm as lgb
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
le = preprocessing.LabelEncoder()
|
lotusxai/join-face-detection-project | src/logistic_regression.py | <reponame>lotusxai/join-face-detection-project
# New split function, for one forecast day
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
import time
|
steppi/gilda | gilda/tests/test_term.py | <filename>gilda/tests/test_term.py<gh_stars>10-100
from gilda.term import Term, get_identifiers_url
def test_standalone_get_url():
assert get_identifiers_url('UP', 'P12345') == \
'https://identifiers.org/uniprot:P12345'
assert get_identifiers_url('HGNC', '12345') == \
'https://identifiers.org/hgnc:12345'
assert get_identifiers_url('CHEBI', 'CHEBI:12345') == \
'https://identifiers.org/CHEBI:12345'
def test_term_get_url():
term = Term(db='CHEBI', id='CHEBI:12345', entry_name='X',
norm_text='x', text='X', source='test', status='name')
assert term.get_idenfiers_url() == \
'https://identifiers.org/CHEBI:12345'
|
steppi/gilda | benchmarks/bioid_evaluation.py | <reponame>steppi/gilda
"""This script benchmarks Gilda on the BioCreative VII BioID corpus.
It dumps multiple result tables in the results folder."""
import json
import os
from collections import defaultdict
from copy import deepcopy
from datetime import datetime
from functools import lru_cache
from textwrap import dedent
from typing import Any, Collection, Dict, Iterable, List, Optional, Set, Tuple
import click
import pandas as pd
import pystow
import tabulate
from lxml import etree
from tqdm import tqdm
import famplex
from gilda.grounder import Grounder, logger
from gilda.resources import mesh_to_taxonomy, popular_organisms
from indra.databases.chebi_client import get_chebi_id_from_pubchem
from indra.databases.hgnc_client import get_hgnc_from_entrez
from indra.databases.uniprot_client import get_hgnc_id
from indra.literature import pmc_client, pubmed_client
from indra.ontology.bio import bio_ontology
logger.setLevel('WARNING')
HERE = os.path.dirname(os.path.abspath(__file__))
TAXONOMY_CACHE_PATH = os.path.join(HERE, 'taxonomy_cache.json')
MODULE = pystow.module('gilda', 'biocreative')
URL = 'https://biocreative.bioinformatics.udel.edu/media/store/files/2017/BioIDtraining_2.tar.gz'
tqdm.pandas()
#: A set of the prefix->prefix mappings missing from the bio-ontology
BO_MISSING_XREFS = set()
class BioIDBenchmarker:
"""Used for evaluating gilda using data from BioCreative VI BioID track
Parameters
----------
grounder :
Grounder object to use in evaluation. If None, instantiates a grounder
with default arguments. Default: None
equivalences :
Dictionary of mappings between namespaces. Maps strings of the form
f'{namespace}:{id}' to strings for equivalent groundings. This is
used to map groundings from namespaces used the the BioID track
(e.g. Uberon, Cell Ontology, Cellosaurus, NCBI Taxonomy) that are not
available by default in Gilda. Default: None
"""
def __init__(
self,
*,
grounder: Optional[Grounder] = None,
equivalences: Optional[Dict[str, Any]] = None,
):
print("using tabulate", tabulate.__version__)
print("Instantiating benchmarker...")
if grounder is None:
grounder = Grounder()
print("Instantiating bio ontology...")
bio_ontology.initialize()
if equivalences is None:
equivalences = {}
available_namespaces = set()
for terms in grounder.entries.values():
for term in terms:
available_namespaces.add(term.db)
self.grounder = grounder
self.equivalences = equivalences
self.available_namespaces = list(available_namespaces)
self.paper_level_grounding = defaultdict(set)
self.processed_data = self._process_annotations_table()
if os.path.exists(TAXONOMY_CACHE_PATH):
with open(TAXONOMY_CACHE_PATH, 'r') as fh:
self.taxonomy_cache = json.load(fh)
else:
self.taxonomy_cache = {}
print('Taxonomy cache length: %s' % len(self.taxonomy_cache))
def get_mappings_tables(self) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Get table showing how goldstandard groundings are being mapped
Namespaces used in the Bioc dataset may only partially overlap with
those used by Gilda. Users may pass in a dictionary of equivalences
mapping groundings used in the Bioc dataset to Gilda's namespaces.
This method generated tables showing how groundings used in the
dataset project onto Gilda's namespaces through these equivalences.
Returns
-------
mapping_table : py:class`pandas.DataFrame`
Rows correspond to namespaces used in the Bioc dataset, columns
to namespaces used in Gilda (automatically populated based on a
Gilda Grounders entries attribute). There is also a row Total
containing the sum of values for all other rows. There are columns
Count, and Total Mapped, showing the total count of entries for
each row namespace, and the total count of entries that could be
mapped to a Gilda namespace respectively.
The same row namespace can be mapped to multiple column namespaces,
causing values in them Total Mapped column to be less than the sum of
values of other columns in the same row. Additionally, in some cases
an entry in the Bioc dataset has multiple curated groundings, causing
the counts not to add up to the number of entries in the dataset.
mapping_table_unique : py:class`pandas.DataFrame`
Similar to mapping table, but counts are given for unique named
entity groundings, ignoring duplication of groundings between rows
in the Bioc dataset.
"""
# Build dataframes for storing information. Values will be filled in
# by looping through rows of the dataset.
index = [get_display_name(ns) for ns in bioc_nmspaces] + ['Total']
columns = (['Count'] +
[get_display_name(ns) for ns in self.available_namespaces] +
['Total Mapped'])
mapping_table = pd.DataFrame(index=index, columns=columns)
mapping_table.fillna(0, inplace=True)
mapping_table_unique = pd.DataFrame(index=index, columns=columns)
mapping_table_unique.fillna(0, inplace=True)
# Maps row namespaces to sets of associated grounding ids
nmspace_ids = defaultdict(set)
# Maps row namespaces to to set of Gilda grounding ids that have
# been mapped to from them
mapped_to_nmspace_ids = defaultdict(set)
# Maps row namespaces to sets of associated grounding ids, but
# only in cases where some mapping exists to a Gilda grounding
mapped_from_nmspace_ids = defaultdict(set)
# Looping through dataframe is costly. There may be a way to write
# this with a clever series of groupbys
for _, row in self.processed_data.iterrows():
# For each row loop through goldstandard groundings. There can
# be more than one
for g1 in row.obj:
# Get the namespace. If it is not one of the namespaces used
# in evaluation, discard and continue to the next iteration
# of the loop
nmspace1 = g1.split(':', maxsplit=1)[0]
if nmspace1 not in bioc_nmspaces:
continue
# Increment total count for this namespace
mapping_table.loc[get_display_name(nmspace1), 'Count'] += 1
# If this particular grounding has not been seen before for
# this namespace increment unique count and mark grounding
# as having been seen
if g1 not in nmspace_ids[nmspace1]:
mapping_table_unique.loc[get_display_name(nmspace1),
'Count'] += 1
nmspace_ids[nmspace1].add(g1)
# Get all of the synonyms that grounding can be mapped to.
# This includes the grounding itself. If a row namespace is
# also a column namespace, we consider this to be a valid
# mapping
synonyms = self.get_synonym_set([g1])
# Track which namespaces have been used so we don't overcount
# when the same grounding can be mapped to multiple groundings
# in the same namespace
used_namespaces = set()
for g2 in synonyms:
nmspace2 = g2.split(':', maxsplit=1)[0]
# If a namespace mapped to is not available in Gilda
# or if we have already tallied a mapping to this namespace
# for this particular row, discard and continue
if nmspace2 not in self.available_namespaces or \
nmspace2 in used_namespaces:
continue
# If Gilda namespace has not been mapped to in the curent
# row increment the count of entries in the namespace with
# a mapping to a Gilda namespace
if not used_namespaces:
mapping_table.loc[get_display_name(nmspace1),
'Total Mapped'] += 1
used_namespaces.add(nmspace2)
# If the grounding g1 has never been mapped to a Gilda
# namespace increment the unique count
if g1 not in mapped_from_nmspace_ids[nmspace1]:
mapping_table_unique. \
loc[get_display_name(nmspace1),
'Total Mapped'] += 1
mapped_from_nmspace_ids[nmspace1].add(g1)
# Increment count for mapping of row namespace to
# column namespace
mapping_table.loc[get_display_name(nmspace1),
get_display_name(nmspace2)] += 1
# If the grounding in column namespace has not been mapped
# to by the grounding in row namespace, increment unique
# count
if g2 not in mapped_to_nmspace_ids[nmspace1]:
mapping_table_unique. \
loc[get_display_name(nmspace1),
get_display_name(nmspace2)] += 1
mapped_to_nmspace_ids[nmspace1].add(g2)
# Generate total rows
mapping_table.loc['Total', :] = mapping_table.sum()
mapping_table_unique.loc['Total', :] = mapping_table_unique.sum()
mapping_table.reset_index(inplace=True)
mapping_table.rename({'index': 'Namespace'}, inplace=True)
mapping_table_unique.reset_index(inplace=True)
mapping_table_unique.rename({'index': 'Namespace'}, inplace=True)
return mapping_table, mapping_table_unique
def _process_annotations_table(self):
"""Extract relevant information from annotations table."""
print("Extracting information from annotations table...")
df = MODULE.ensure_tar_df(
url=URL,
inner_path='BioIDtraining_2/annotations.csv',
read_csv_kwargs=dict(sep=',', low_memory=False),
)
# Split entries with multiple groundings then normalize ids
df.loc[:, 'obj'] = df['obj'].apply(self._normalize_ids)
# Add synonyms of gold standard groundings to help match more things
df.loc[:, 'obj_synonyms'] = df['obj'].apply(self.get_synonym_set)
# Create column for entity type
df.loc[:, 'entity_type'] = df.apply(self._get_entity_type_helper, axis=1)
processed_data = df[['text', 'obj', 'obj_synonyms', 'entity_type',
'don_article']]
print("%d rows in processed annotations table." % len(processed_data))
processed_data = processed_data[processed_data.entity_type
!= 'unknown']
print("%d rows in annotations table without unknowns." %
len(processed_data))
for don_article, text, synonyms in df[['don_article', 'text',
'obj_synonyms']].values:
self.paper_level_grounding[don_article, text].update(synonyms)
return processed_data
def _get_entity_type_helper(self, row) -> str:
if self._get_entity_type(row.obj) != 'Gene':
return self._get_entity_type(row.obj)
elif any(y.startswith('HGNC') for y in row.obj_synonyms):
return 'Human Gene'
else:
return 'Nonhuman Gene'
def ground_entities_with_gilda(self, context=True):
"""Compute gilda groundings of entity texts in corpus
Adds two columns to the internal dataframe for groundings with
and without context based disambiguation.
"""
df = self.processed_data
tqdm.write("Grounding no-context corpus with Gilda...")
df.loc[:, 'groundings_no_context'] = df.text. \
progress_apply(self._get_grounding_list)
if context:
tqdm.write("Grounding with-context corpus with Gilda...")
# use from tqdm.contrib.concurrent import thread_map
df.loc[:, 'groundings'] = df. \
progress_apply(self._get_row_grounding_list, axis=1)
else:
tqdm.write("Skipping grounding with context.")
df.loc[:, 'groundings'] = df.groundings_no_context
tqdm.write("Finished grounding corpus with Gilda...")
self._evaluate_gilda_performance()
def _get_row_grounding_list(self, row):
return self._get_grounding_list(
row.text,
context=self._get_plaintext(row.don_article),
organisms=self._get_organism_priority(row.don_article),
)
@lru_cache(maxsize=None)
def _get_plaintext(self, don_article: str) -> str:
"""Get plaintext content from XML file in BioID corpus
Parameters
----------
don_article :
Identifier for paper used within corpus.
Returns
-------
:
Plaintext of specified article
"""
directory = MODULE.ensure_untar(url=URL, directory='BioIDtraining_2')
path = directory.joinpath('BioIDtraining_2', 'fulltext_bioc',
f'{don_article}.xml')
tree = etree.parse(path.as_posix())
paragraphs = tree.xpath('//text')
paragraphs = [' '.join(text.itertext()) for text in paragraphs]
return '/n'.join(paragraphs) + '/n'
def _get_organism_priority(self, don_article):
don_article = str(don_article)
if don_article in self.taxonomy_cache:
return self.taxonomy_cache[don_article]
pubmed_id = pubmed_from_pmc(don_article)
taxonomy_ids = get_taxonomy_for_pmid(pubmed_id)
organisms = [o for o in popular_organisms
if o in taxonomy_ids] + \
[o for o in popular_organisms
if o not in taxonomy_ids]
self.taxonomy_cache[don_article] = organisms
return organisms
@classmethod
def _normalize_ids(cls, curies: str) -> List[str]:
return [cls._normalize_id(y) for y in curies.split('|')]
@staticmethod
def _normalize_id(curie):
"""Convert ID into standardized format, f'{namespace}:{id}'."""
if curie.startswith('CVCL'):
return curie.replace('_', ':')
split_id = curie.split(':', maxsplit=1)
if split_id[0] == 'Uberon':
return split_id[1]
if split_id[0] == 'Uniprot':
return f'UP:{split_id[1]}'
if split_id[0] in ['GO', 'CHEBI']:
return f'{split_id[0]}:{split_id[0]}:{split_id[1]}'
return curie
@staticmethod
def _get_entity_type(groundings: Collection[str]) -> str:
"""Get entity type based on entity groundings of text in corpus."""
if any(
grounding.startswith('NCBI gene') or grounding.startswith('UP')
for grounding in groundings
):
return 'Gene'
elif any(grounding.startswith('Rfam') for grounding in groundings):
return 'miRNA'
elif any(grounding.startswith('CHEBI') or grounding.startswith('PubChem')
for grounding in groundings):
return 'Small Molecule'
elif any(grounding.startswith('GO') for grounding in groundings):
return 'Cellular Component'
elif any(
grounding.startswith('CVCL') or grounding.startswith('CL')
for grounding in groundings
):
return 'Cell types/Cell lines'
elif any(grounding.startswith('UBERON') for grounding in groundings):
return 'Tissue/Organ'
elif any(grounding.startswith('NCBI taxon') for grounding in groundings):
return 'Taxon'
else:
return 'unknown'
def _get_grounding_list(
self,
text: str,
context=None,
organisms=None,
) -> List[Tuple[str, float]]:
"""Run gilda on a text and extract list of result-score tuples."""
groundings = self.grounder.ground(text, context=context,
organisms=organisms)
result = []
for grounding in groundings:
db, id_ = grounding.term.db, grounding.term.id
result.append((f'{db}:{id_}', grounding.score))
return result
def get_synonym_set(self, curies: Iterable[str]) -> Set[str]:
"""Return set containing all elements in input list along with synonyms
"""
output = set()
for curie in curies:
output.update(self._get_equivalent_entities(curie))
# We accept all FamPlex terms that cover some or all of the specific
# entries in the annotations
covered_fplx = {fplx_entry for fplx_entry, members
in fplx_members.items() if (members <= output)}
output |= {'FPLX:%s' % fplx_entry for fplx_entry in covered_fplx}
return output
def _get_equivalent_entities(self, curie: str) -> Set[str]:
"""Return set of equivalent entity groundings
Uses set of equivalences in self.equiv_map as well as those
available in indra's hgnc, uniprot, and chebi clients.
"""
output = {curie}
prefix, identifier = curie.split(':', maxsplit=1)
for xref_prefix, xref_id in bio_ontology.get_mappings(prefix, identifier):
output.add(f'{xref_prefix}:{xref_id}')
# TODO these should all be in bioontology, eventually
for xref_curie in self.equivalences.get(curie, []):
if xref_curie in output:
continue
xref_prefix, xref_id = xref_curie.split(':', maxsplit=1)
if (prefix, xref_prefix) not in BO_MISSING_XREFS:
BO_MISSING_XREFS.add((prefix, xref_prefix))
tqdm.write(f'Bioontology is missing mappings from {prefix} to {xref_prefix}')
output.add(xref_curie)
if prefix == 'NCBI gene':
hgnc_id = get_hgnc_from_entrez(identifier)
if hgnc_id is not None:
output.add(f'HGNC:{hgnc_id}')
if prefix == 'UP':
hgnc_id = get_hgnc_id(identifier)
if hgnc_id is not None:
output.add(f'HGNC:{hgnc_id}')
if prefix == 'PubChem':
chebi_id = get_chebi_id_from_pubchem(identifier)
if chebi_id is not None:
output.add(f'CHEBI:CHEBI:{chebi_id}')
return output
@staticmethod
def famplex_isa(hgnc_id: str, fplx_id: str) -> bool:
"""Check if hgnc entity satisfies and isa relation with famplex entity
Parameters
----------
hgnc_id :
String of the form f'{namespace}:{id}'
fplx_id :
String of the form f'{namespace}:{id}'
Returns
-------
:
True if hgnc_id corresponds to a valid HGNC grounding,
fplx_id corresponds to a valid Famplex grounding and the
former isa the later.
"""
# TODO can this be swapped directly for the bioontology?
return famplex.isa('HGNC', hgnc_id, 'FPLX', fplx_id)
def isa(self, curie_1: str, curie_2: str) -> bool:
"""True if id1 satisfies isa relationship with id2."""
# if curie_1.startswith('MESH') and curie_2.startswith('MESH'):
# return mesh_isa(curie_1, curie_2)
# # Handle GOGO problem
# elif curie_1.startswith('GO') and curie_2.startswith('GO'):
# curie_1 = curie_1.split(':', maxsplit=1)[1]
# curie_2 = curie_2.split(':', maxsplit=1)[1]
# try:
# return nx.has_path(self.godag, curie_1, curie_2)
# except Exception:
# return False
# else:
# return curie_1 in self.isa_relations and curie_2 in self.isa_relations[curie_1]
ns1, id1 = curie_1.split(':', maxsplit=1)
ns2, id2 = curie_2.split(':', maxsplit=1)
# TODO did we need to keep some processing on the IDs?
return bio_ontology.isa(ns1, id1, ns2, id2)
def top_correct(self, row, disamb=True) -> bool:
groundings = row.groundings if disamb \
else row.groundings_no_context
if not groundings:
return False
groundings = [g[0] for g in groundings]
top_grounding = groundings[0]
ref_groundings = \
self.paper_level_grounding[(row.don_article, row.text)]
return top_grounding in ref_groundings
def exists_correct(self, row, disamb: bool = True) -> bool:
groundings = row.groundings if disamb \
else row.groundings_no_context
if not groundings:
return False
groundings = {g[0] for g in groundings}
ref_groundings = \
self.paper_level_grounding[(row.don_article, row.text)]
return len(groundings & ref_groundings) > 0
def top_correct_w_fplx(self, row, disamb: bool = True) -> bool:
groundings = row.groundings if disamb \
else row.groundings_no_context
if not groundings:
return False
groundings = [g[0] for g in groundings]
top_grounding = groundings[0]
ref_groundings = \
self.paper_level_grounding[(row.don_article, row.text)]
return any(
x == top_grounding or self.famplex_isa(x, top_grounding)
for x in ref_groundings
)
def exists_correct_w_fplx(self, row, disamb: bool = True) -> bool:
groundings = row.groundings if disamb \
else row.groundings_no_context
if not groundings:
return False
groundings = [g[0] for g in groundings]
ref_groundings = \
self.paper_level_grounding[(row.don_article, row.text)]
return any(
x == y or self.famplex_isa(x, y)
for x in ref_groundings
for y in groundings
)
def top_correct_loose(self, row, disamb=True) -> bool:
groundings = row.groundings if disamb \
else row.groundings_no_context
if not groundings:
return False
groundings = [g[0] for g in groundings]
top_grounding = groundings[0]
ref_groundings = \
self.paper_level_grounding[(row.don_article, row.text)]
return any(
x == top_grounding
or self.isa(x, top_grounding)
or self.isa(top_grounding, x)
for x in ref_groundings
)
def exists_correct_loose(self, row, disamb=True) -> bool:
groundings = row.groundings if disamb \
else row.groundings_no_context
if not groundings:
return False
groundings = [g[0] for g in groundings]
ref_groundings = \
self.paper_level_grounding[(row.don_article, row.text)]
return any(
x == y
or self.isa(x, y)
or self.isa(y, x)
for x in ref_groundings
for y in groundings
)
def _evaluate_gilda_performance(self):
"""Calculate statistics showing Gilda's performance on corpus
Directly updates internal dataframe
"""
print("Evaluating performance...")
df = self.processed_data
df.loc[:, 'top_correct'] = df.apply(self.top_correct, axis=1)
df.loc[:, 'top_correct_w_fplx'] = df.apply(self.top_correct_w_fplx, axis=1)
df.loc[:, 'top_correct_loose'] = df.apply(self.top_correct_loose, axis=1)
df.loc[:, 'exists_correct'] = df.apply(self.exists_correct, axis=1)
df.loc[:, 'exists_correct_w_fplx'] = df. \
apply(self.exists_correct_w_fplx, axis=1)
df.loc[:, 'exists_correct_loose'] = df. \
apply(self.exists_correct_loose, axis=1)
df.loc[:, 'top_correct_no_context'] = df. \
apply(lambda row: self.top_correct(row, False), axis=1)
df.loc[:, 'top_correct_w_fplx_no_context'] = df. \
apply(lambda row: self.top_correct_w_fplx(row, False), axis=1)
df.loc[:, 'top_correct_loose_no_context'] = df. \
apply(lambda row: self.top_correct_loose(row, False), axis=1)
df.loc[:, 'Has Grounding'] = df.groundings. \
apply(lambda x: len(x) > 0)
print("Finished evaluating performance...")
def get_results_tables(
self,
match: Optional[str] = 'loose',
with_context: bool = True,
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""Get tables of results
Parameters
----------
match :
One of 'strict', 'w_fplex', or 'loose'. 'strict' only counts
a Gilda grounding as a match if it is an exact match or equivalent
to at least one of the curated groundings for the entry
(some entries have multiple equivalent curated groundings).
'w_fplex' also counts a Gilda grounding as a match if the curated
grounding has an HGNC equivalent and satisfies and isa relationship
with the Gilda grounding. 'loose' counts the pair x, y of a curated
grounding and a Gilda groundings as a match if x isa y or y isa x
within the FPLX, MESH, and GO ontologies, or within the dictionary
of isa_relations.
Returns
-------
counts_table : py:class:`pandas.DataFrame`
precision_recall : py:class:`pandas.DataFrame`
disamb_table : py:class:`pandas.DataFrame`
"""
if match not in ['strict', 'w_fplex', 'loose']:
raise ValueError("match must be one of 'strict', 'w_famplex', or"
" 'loose'.")
df = self.processed_data
if 'top_correct' not in df.columns:
raise RuntimeError('Gilda groundings have not been computed')
res_df = df[['entity_type', 'top_correct', 'top_correct_no_context',
'exists_correct', 'top_correct_w_fplx',
'top_correct_w_fplx_no_context', 'exists_correct_w_fplx',
'top_correct_loose', 'top_correct_loose_no_context',
'exists_correct_loose', 'Has Grounding']].copy()
res_df.loc[:, 'Total'] = True
total = res_df.drop('entity_type', axis=1).sum()
total = total.to_frame().transpose()
total.loc[:, 'entity_type'] = 'Total'
stats = res_df.groupby('entity_type', as_index=False).sum()
stats = stats[stats['entity_type'] != 'unknown']
stats = stats.append(total, ignore_index=True)
stats.loc[:, stats.columns[1:]] = stats[stats.columns[1:]].astype(int)
if match == 'strict':
score_cols = ['top_correct', 'exists_correct']
else:
score_cols = [f'top_correct_{match}', f'exists_correct_{match}']
if not with_context:
# OLD
# score_cols[0] = score_cols[0] + ['_no_context']
score_cols[0] = score_cols[0] + '_no_context'
cols = ['entity_type'] + score_cols + ['Has Grounding', 'Total']
counts_table = deepcopy(stats[cols])
new_column_names = ['Entity Type', 'Correct', 'Exists Correct',
'Has Grounding', 'Total']
counts_table.columns = new_column_names
precision_recall = pd.DataFrame(
index=stats.index,
columns=[
'Entity Type',
'Precision',
'Exists Correct PR',
'Recall',
'Exists Correct RC',
],
)
precision_recall.loc[:, 'Entity Type'] = counts_table['Entity Type']
precision_recall.loc[:, 'Precision'] = \
round(counts_table['Correct'] /
counts_table['Has Grounding'], 3)
precision_recall.loc[:, 'Exists Correct PR'] = \
round(counts_table['Exists Correct'] /
counts_table['Has Grounding'], 3)
precision_recall.loc[:, 'Recall'] = round(counts_table['Correct'] /
counts_table['Total'], 3)
precision_recall.loc[:, 'Exists Correct RC'] = \
round(counts_table['Exists Correct'] / counts_table['Total'], 3)
precision_recall.loc[:, 'Correct F1'] = \
f1(precision_recall.loc[:, 'Precision'],
precision_recall.loc[:, 'Recall'])
precision_recall.loc[:, 'Exists Correct F1'] = \
f1(precision_recall.loc[:, 'Exists Correct PR'],
precision_recall.loc[:, 'Exists Correct RC'])
cols = ['entity_type', 'top_correct_loose_no_context',
'top_correct_loose', 'exists_correct', 'Total']
new_column_names = ['Entity Type', 'Correct', 'Correct (disamb)',
'Exists Correct', 'Total']
disamb_table = stats[cols]
disamb_table.columns = new_column_names
return counts_table, precision_recall, disamb_table
def get_taxonomy_for_pmid(pmid: str) -> Set[str]:
if not pmid:
return set()
import time
tqdm.write(f'Looking up annotations for pmid:{pmid}')
time.sleep(2)
mesh_annots = pubmed_client.get_mesh_annotations(pmid)
if mesh_annots is None:
return set()
mesh_ids = {annot['mesh'] for annot in mesh_annots}
taxonomy_ids = set()
for mesh_id in mesh_ids:
if mesh_id in mesh_to_taxonomy:
taxonomy_ids.add(mesh_to_taxonomy[mesh_id])
mesh_parents = [id for ns, id in
bio_ontology.get_parents('MESH', mesh_id)]
for mesh_parent in mesh_parents:
if mesh_parent in mesh_to_taxonomy:
taxonomy_ids.add(mesh_to_taxonomy[mesh_parent])
tqdm.write('-----')
tqdm.write('PMID: %s' % pmid)
for mesh_annot in mesh_annots:
tqdm.write(mesh_annot['text'])
tqdm.write('Taxonomy IDs: %s' % taxonomy_ids)
tqdm.write('-----')
return taxonomy_ids
def pubmed_from_pmc(pmc_id):
pmc_id = str(pmc_id)
if not pmc_id.startswith('PMC'):
pmc_id = f'PMC{pmc_id}'
ids = pmc_client.id_lookup(pmc_id, 'pmcid')
pmid = ids.get('pmid')
return pmid
#: Namespaces used in Bioc dataset after standardization
bioc_nmspaces = ['UP', 'NCBI gene', 'Rfam', 'CHEBI', 'PubChem', 'GO',
'CL', 'CVCL', 'UBERON', 'NCBI taxon']
#: Mapping of namespaces to row and column names. Namespaces not
#: included will be used as row and column names unmodifed.
nmspace_displaynames = {
'UP': 'Uniprot', 'NCBI gene': 'Entrez',
'PubChem': 'PubChem', 'CL': 'Cell Ontology',
'CVCL': 'Cellosaurus', 'UBERON': 'Uberon',
'FPLX': 'Famplex'
}
def get_famplex_members():
from indra.databases import hgnc_client
fplx_entities = famplex.load_entities()
fplx_children = defaultdict(set)
for fplx_entity in fplx_entities:
members = famplex.individual_members('FPLX', fplx_entity)
for db_ns, db_id in members:
if db_ns == 'HGNC':
db_id = hgnc_client.get_current_hgnc_id(db_id)
if db_id:
fplx_children[fplx_entity].add('%s:%s' % (db_ns, db_id))
return dict(fplx_children)
def get_display_name(ns: str) -> str:
"""Gets row/column name associated to a namespace"""
return nmspace_displaynames[ns] if ns in nmspace_displaynames else ns
def f1(precision: float, recall: float) -> float:
"""Calculate the F1 score."""
return 2 * precision * recall / (precision + recall)
@click.command()
@click.option(
'--data',
type=click.Path(dir_okay=True, file_okay=False),
default=os.path.join(HERE, 'data'),
)
@click.option(
'--results',
type=click.Path(dir_okay=True, file_okay=False),
default=os.path.join(HERE, 'results'),
)
def main(data: str, results: str):
"""Run this script to evaluate gilda on the BioCreative VI BioID corpus.
It has two optional arguments, --datapath and --resultspath that specify
the path to the directory with necessary data and the path to the
directory where results will be stored. The directory at datapath must
contain a folder BIoIDtraining_2 containing the BIoID training corpus,
and can optionally contain files equivalences.json
serializing dictionaries of equivalence and isa relations between
groundings. Results files will be added to the results directory in
timestamped files.
The data can be downloaded from
https://biocreative.bioinformatics.udel.edu/media/store/files/2017/BioIDtraining_2.tar.gz,
and needs to be extracted in the benchmarks/data folder.
"""
data_path = os.path.expandvars(os.path.expanduser(data))
results_path = os.path.expandvars(os.path.expanduser(results))
os.makedirs(data_path, exist_ok=True)
os.makedirs(results_path, exist_ok=True)
try:
with open(os.path.join(data_path, 'equivalences.json')) as f:
equivalences = json.load(f)
except FileNotFoundError:
equivalences = {}
benchmarker = BioIDBenchmarker(equivalences=equivalences)
benchmarker.ground_entities_with_gilda()
print("Constructing mappings table...")
mappings_table, mappings_table_unique = benchmarker.get_mappings_tables()
print("Constructing results table...")
counts, precision_recall, disamb_table = \
benchmarker.get_results_tables(match='strict')
print(precision_recall.to_markdown(index=False))
# Generate output document
caption0 = dedent(f"""\
# Gilda Benchmarking
Bio-ontology: v{bio_ontology.version}
""")
caption1 = dedent("""\
## Table 1
Mapping of groundings for entities in BioID corpus into namespaces used by
Gilda. Count is by entries in corpus with groundings being counted multiple
times if they occur in more than one entry. Some entries contain multiple
equivalent curated groundings, leading to a discrepancy between the counts
shown here and those in the other tables.
""")
table1 = mappings_table.to_markdown(index=False)
caption2 = dedent("""\
## Table 2
Mapping of groundings for entities in BioID corpus into Namespaces used by
Gilda. Count is by unique groundings, with the same grounding only being
counted once even if it appears in many entries.
""")
table2 = mappings_table_unique.to_markdown(index=False)
caption3 = dedent("""\
## Table 3
Counts of number of entries in corpus for each entity type, along with
number of entries where Gilda's top grounding is correct, the number
where one of Gilda's groundings is correct, and the number of entries
where Gilda produced some grounding. Context based disambiguation is
applied and Gilda's groundings are considered correct if there is
an isa relation between the gold standard grounding and Gilda's or
vice versa.
""")
table3 = counts.to_markdown(index=False)
caption4 = dedent("""\
## Table 4
Precision and recall values for Gilda performance by entity type. Values
are given both for the case where Gilda is considered correct only if the
top grounding matches and the case where Gilda is considered correct if
any of its groundings match.
""")
table4 = precision_recall.to_markdown(index=False)
caption5 = dedent("""\
## Table 5
Comparison of results with and without context based disambiguation.
""")
table5 = disamb_table.to_markdown(index=False)
output = '\n\n'.join([
caption0,
caption1, table1,
caption2, table2,
caption3, table3,
caption4, table4,
caption5, table5,
])
time = datetime.now().strftime('%y-%m-%d-%H:%M:%S')
outname = f'benchmark_{time}'
md_path = os.path.join(results_path, f'{outname}.md')
with open(md_path, 'w') as f:
f.write(output)
print(f'Output summary at {md_path}')
latex_output = dedent(f'''\
\\section{{Tables}}
{mappings_table.to_latex(index=False, caption=caption1, label='tab:mappings')}
{mappings_table_unique.to_latex(index=False, caption=caption2, label='tab:mappings-unique')}
{counts.to_latex(index=False, caption=caption3, label='tab:counts')}
{precision_recall.round(3).to_latex(index=False, caption=caption4, label='tab:precision-recall')}
{disamb_table.to_latex(index=False, caption=caption5, label='tab:disambiguation')}
''')
latex_path = os.path.join(results_path, f'{outname}.tex')
with open(latex_path, 'w') as file:
print(latex_output, file=file)
tsv_path = os.path.join(results_path, f'{outname}.tsv')
benchmarker.processed_data.to_csv(tsv_path, sep='\t', index=False)
if __name__ == '__main__':
fplx_members = get_famplex_members()
main()
|
steppi/gilda | gilda/tests/test_grounder.py | <filename>gilda/tests/test_grounder.py
from gilda.term import Term
from gilda.grounder import Grounder, filter_for_organism
from . import appreq
gr = Grounder()
def test_grounder():
entries = gr.lookup('kras')
statuses = [e.status for e in entries]
assert 'assertion' in statuses
for entry in entries:
if entry.status == 'assertion':
assert entry.id == '6407', entry
scores = gr.ground('kras')
assert len(scores) == 1, scores
assert appreq(scores[0].score, 0.9845), scores
scores = gr.ground('k-ras')
assert len(scores) == 1, scores
assert appreq(scores[0].score, 0.9936), scores
scores = gr.ground('KRAS')
assert len(scores) == 1, scores
assert appreq(scores[0].score, 1.0), scores
scores = gr.ground('bRaf')
assert len(scores) == 1, scores
assert appreq(scores[0].score, 0.9936), scores
def test_grounder_bug():
# Smoke test to make sure the 'NA' entry in grounding terms doesn't get
# turned into a None
gr.ground('Na')
def test_grounder_num_entries():
entries = gr.lookup('NPM1')
assert len(entries) == 4, entries
entries = gr.lookup('H4')
assert len(entries) == 7, entries
def test_grounder_depluralize():
# Note that lookup returns all matches with no de-duplication
# or filtering so we get two identical FPLX entries and a yeast protein
# entry here.
entries = gr.lookup('RAFs')
assert len(entries) == 9, entries
for entry in entries:
assert entry.norm_text == 'raf'
def test_disambiguate_adeft():
matches = gr.ground('IR')
matches = gr.disambiguate('IR', matches, 'Insulin Receptor (IR)')
for match in matches:
assert match.disambiguation is not None
assert match.disambiguation['type'] == 'adeft'
assert match.disambiguation['match'] in ('grounded', 'ungrounded')
assert match.disambiguation['score'] is not None
if match.term.db == 'HGNC' and match.term.id == '6091':
assert match.disambiguation['match'] == 'grounded'
assert match.disambiguation['score'] == 1.0
def test_disambiguate_gilda():
matches = gr.ground('NDR1')
matches = gr.disambiguate('NDR1', matches, 'STK38')
for match in matches:
assert match.disambiguation['type'] == 'gilda'
assert match.disambiguation['match'] == 'grounded'
if match.term.db == 'HGNC' and match.term.id == '17847':
assert match.disambiguation['score'] > 0.99
if match.term.db == 'HGNC' and match.term.id == '7679':
assert match.disambiguation['score'] < 0.01
def test_rank_namespace():
matches = gr.ground('interferon-gamma')
assert matches[0].term.db == 'HGNC'
def test_aa_synonym():
matches = gr.ground('WN')
assert '141447' not in {m.term.id for m in matches}
matches = gr.ground('W-N')
assert '141447' not in {m.term.id for m in matches}
def test_organism_filter():
dummy = 'dummy'
t1 = Term('x', dummy, dummy, dummy, dummy, dummy, dummy, '9606')
t2 = Term('x', dummy, dummy, dummy, dummy, dummy, dummy, '10090')
t3 = Term('x', dummy, dummy, dummy, dummy, dummy, dummy, None)
terms = filter_for_organism([t1, t2, t3],
organisms=['9606'])
assert len(terms) == 2, terms
assert {t.organism for t in terms} == {None, '9606'}
terms = filter_for_organism([t1, t2, t3],
organisms=['10090'])
assert len(terms) == 2, terms
assert {t.organism for t in terms} == {None, '10090'}
terms = filter_for_organism([t1, t2, t3],
organisms=['10090', '9606'])
assert len(terms) == 2, terms
assert {t.organism for t in terms} == {None, '10090'}
terms = filter_for_organism([t1, t2, t3],
organisms=['9606', '10090'])
assert len(terms) == 2, terms
assert {t.organism for t in terms} == {None, '9606'}
def test_organisms():
matches = gr.ground('Raf1')
assert len(matches) == 2, len(matches)
organisms = {match.term.organism for match in matches}
assert organisms == {'9606'}, matches
matches = gr.ground('Raf1', organisms=['10090'])
assert len(matches) == 1, len(matches)
organisms = {match.term.organism for match in matches}
assert organisms == {'10090'}
matches = gr.ground('Raf1', organisms=['9606', '10090'])
assert len(matches) == 2, len(matches)
organisms = {match.term.organism for match in matches}
assert organisms == {'9606'}, matches
matches = gr.ground('Raf1', organisms=['10090', '9606'])
assert len(matches) == 1, len(matches)
organisms = {match.term.organism for match in matches}
assert organisms == {'10090'}, matches
def test_nonhuman_gene_synonyms():
matches = gr.ground('Tau', organisms=['10090'])
assert matches[0].term.db == 'UP', matches
assert matches[0].term.id == 'P10637', matches
def test_uniprot_gene_synonym():
matches = gr.ground('MEKK2')
assert matches[0].term.db == 'HGNC', matches
assert matches[0].term.entry_name == 'MAP3K2'
def test_greek_to_spelled_out():
matches = gr.ground('interferon-γ')
assert matches
assert matches[0].term.entry_name == 'IFNG' |
steppi/gilda | gilda/resources/__init__.py | <filename>gilda/resources/__init__.py<gh_stars>0
import os
import boto3
import pystow
import logging
import botocore
from gilda import __version__
logger = logging.getLogger(__name__)
HERE = os.path.abspath(os.path.dirname(__file__))
MESH_MAPPINGS_PATH = os.path.join(HERE, 'mesh_mappings.tsv')
resource_dir = pystow.join('gilda', __version__)
GROUNDING_TERMS_BASE_NAME = 'grounding_terms.tsv'
GROUNDING_TERMS_PATH = os.path.join(resource_dir, GROUNDING_TERMS_BASE_NAME)
# Popular organisms per UniProt, see
# https://www.uniprot.org/help/filter_options
popular_organisms = ['9606', '10090', '10116', '9913', '7955', '7227',
'6239', '44689', '3702', '39947', '83333', '224308',
'559292']
# NOTE: these are not all exact mappings..
# Several mappings here are to the closest match which works correctly
# in this setting but isn't generally speaking a valid xref.
taxonomy_to_mesh = {
'9606': 'D006801',
'10090': 'D051379',
'10116': 'D051381',
'9913': 'D002417',
'7955': 'D015027',
'7227': 'D004331',
'6239': 'D017173',
'44689': 'D004023',
'3702': 'D017360',
'39947': 'D012275',
'83333': 'D048168',
'224308': 'D001412',
'559292': 'D012441',
}
mesh_to_taxonomy = {v: k for k, v in taxonomy_to_mesh.items()}
def _download_from_s3(path, base_name):
config = botocore.client.Config(signature_version=botocore.UNSIGNED)
s3 = boto3.client('s3', config=config)
tc = boto3.s3.transfer.TransferConfig(use_threads=False)
full_key = '%s/%s' % (__version__, base_name)
out_file = os.path.join(path, base_name)
s3.download_file('gilda', full_key, out_file, Config=tc)
return out_file
def get_grounding_terms():
base_name = GROUNDING_TERMS_BASE_NAME
full_path = GROUNDING_TERMS_PATH
if not os.path.exists(full_path):
logger.info('Downloading grounding terms from S3.')
out_file = _download_from_s3(resource_dir, base_name)
logger.info('Saved grounding terms into: %s' % out_file)
return full_path
def get_gilda_models():
base_name = 'gilda_models.pkl'
full_path = os.path.join(resource_dir, base_name)
if not os.path.exists(full_path):
logger.info('Downloading disambiguation models from S3.')
out_file = _download_from_s3(resource_dir, base_name)
logger.info('Saved disambiguation models into: %s' % out_file)
return full_path
|
steppi/gilda | gilda/app/__init__.py | import argparse
from .app import app
def main():
parser = argparse.ArgumentParser(
description='Run the grounding app.')
parser.add_argument('--host', default='0.0.0.0')
parser.add_argument('--port', default=8001, type=int)
args = parser.parse_args()
app.run(host=args.host, port=args.port, threaded=False)
|
steppi/gilda | scripts/git_patch_process.py | <filename>scripts/git_patch_process.py
"""This is a helper script to edit the git patch created by
changes in grounding_cell_designer.py on the COVID-19 Disease Map XMLs.
The edited patch can then be applied and committed to that repo."""
import sys
def get_blocks(fname):
"""Get blocks of the diff that can be independently filtered."""
with open(fname, 'r') as fh:
lines = iter(fh.readlines())
parts = []
line = next(lines)
while True:
if line.startswith('diff --git'):
block = [line]
for line in lines:
if line.startswith('@@'):
break
block.append(line)
parts.append(block)
if line.startswith('@@'):
block = [line]
for line in lines:
if line.startswith('@@') or line.startswith('diff --git'):
break
block.append(line)
parts.append(block)
if line.startswith('\\ No newline'):
parts[-1].append(line)
try:
line = next(lines)
except StopIteration:
break
if not lines:
break
return parts
def filter_blocks(blocks):
"""Filter out spurious diffs caused by XML deserialization/serialization."""
new_blocks = []
for block in blocks:
if any(l.startswith('-<rdf:RDF') for l in block):
continue
if any(l.startswith('-<math') for l in block):
continue
if any(l.startswith('-<sbml') for l in block):
continue
if any(l.startswith('-<body') for l in block):
continue
if any(''' in l for l in block):
continue
new_blocks.append(block)
return new_blocks
def dump_blocks(blocks, fname):
"""Dump filtered diffs back into a patch file."""
with open(fname, 'w') as fh:
for block in blocks:
for line in block:
fh.write(line)
if __name__ == '__main__':
# 1. run the grounding_cell_designer.py script
# 2. in the C19DM repo run git diff --binary > patch.diff
# 3. run this script on patch.diff
# 4. git apply patch_edited.diff
patch_path = sys.argv[1]
blocks = get_blocks(patch_path)
blocks = filter_blocks(blocks)
dump_blocks(blocks, patch_path[:-5] + '_edited.diff')
|
steppi/gilda | gilda/tests/test_api.py | from gilda.tests import appreq
from gilda.api import *
def test_api_ground():
scores = ground('kras')
assert appreq(scores[0].score, 0.9845), scores
scores = ground('ROS', 'reactive oxygen')
assert scores[0].term.db == 'MESH', scores
assert scores[0].url == 'https://identifiers.org/mesh:D017382'
def test_get_models():
models = get_models()
assert len(models) > 500
assert 'STK1' in models
def test_get_names():
names = get_names('HGNC', '6407')
assert len(names) > 5, names
assert 'K-Ras' in names
def test_organisms():
# Default human gene match
matches1 = ground('SMN1')
assert len(matches1) == 1
assert matches1[0].term.db == 'HGNC'
assert matches1[0].term.id == '11117'
# Prioritize human gene match
matches2 = ground('SMN1', organisms=['9606', '10090'])
assert len(matches2) == 1
assert matches2[0].term.db == 'HGNC'
assert matches2[0].term.id == '11117'
# Prioritize mouse, SMN is grounded correctly
matches3 = ground('SMN', organisms=['10090', '9606'])
assert len(matches3) == 2, matches3
assert matches3[0].term.db == 'UP'
assert matches3[0].term.id == 'P63163'
# Here we use SMN again but prioritize human and get three bad groundings
matches4 = ground('SMN', organisms=['9606', '10090'])
assert len(matches4) == 3, matches4
assert all(m.term.organism == '9606' for m in matches4)
# Finally we try grounding SMN1 with mouse prioritized, don't find a match
# and end up with the human gene grounding
matches5 = ground('TDRD16A', organisms=['10090', '9606'])
assert len(matches5) == 1, matches5
assert matches5[0].term.db == 'HGNC', matches5
assert matches5[0].term.id == '11117', matches5
|
steppi/gilda | gilda/api.py | <filename>gilda/api.py<gh_stars>10-100
__all__ = ['ground', 'get_models', 'get_names']
from gilda.grounder import Grounder
class GrounderInstance(object):
def __init__(self):
self.grounder = None
def get_grounder(self):
if self.grounder is None:
self.grounder = Grounder()
return self.grounder
def ground(self, text, context=None, organisms=None):
return self.get_grounder().ground(text, context=context,
organisms=organisms)
def get_models(self):
return self.get_grounder().get_models()
def get_names(self, db, id, status=None, source=None):
return self.get_grounder().get_names(db, id,
status=status,
source=source)
grounder = GrounderInstance()
def ground(text, context=None, organisms=None):
"""Return a list of scored matches for a text to ground.
Parameters
----------
text : str
The entity text to be grounded.
context : Optional[str]
Any additional text that serves as context for disambiguating the
given entity text, used if a model exists for disambiguating the
given text.
Returns
-------
list[gilda.grounder.ScoredMatch]
A list of ScoredMatch objects representing the groundings.
"""
return grounder.ground(text=text, context=context, organisms=organisms)
def get_models():
"""Return a list of entity texts for which disambiguation models exist.
Returns
-------
list[str]
The list of entity texts for which a disambiguation model is
available.
"""
return grounder.get_models()
def get_names(db, id, status=None, source=None):
"""Return a list of entity texts corresponding to a given database ID.
Parameters
----------
db : str
The database in which the ID is an entry, e.g., HGNC.
id : str
The ID of an entry in the database.
status : Optional[str]
If given, only entity texts with the given status e.g., "synonym"
are returned.
source : Optional[str]
If given, only entity texts from the given source e.g., "uniprot"
are returned.
"""
return grounder.get_names(db, id, status=status, source=source)
|
steppi/gilda | gilda/term.py | class Term(object):
"""Represents a text entry corresponding to a grounded term.
Attributes
----------
norm_text : str
The normalized text corresponding to the text entry, used for lookups.
text : str
The text entry itself.
db : str
The database / name space corresponding to the grounded term.
id : str
The identifier of the grounded term within the database / name space.
entry_name : str
The standardized name corresponding to the grounded term.
status : str
The relationship of the text entry to the grounded term, e.g., synonym.
source : str
The source from which the term was obtained.
"""
def __init__(self, norm_text, text, db, id, entry_name, status, source,
organism=None):
if not text:
raise ValueError('Text for Term cannot be empty')
self.norm_text = norm_text
self.text = text
self.db = db
self.id = str(id)
self.entry_name = entry_name
self.status = status
self.source = source
self.organism = organism
def __str__(self):
return 'Term(%s,%s,%s,%s,%s,%s,%s,%s)' % (
self.norm_text, self.text, self.db, self.id, self.entry_name,
self.status, self.source, self.organism)
def __repr__(self):
return str(self)
def to_json(self):
"""Return the term serialized into a JSON dict."""
js = {
'norm_text': self.norm_text,
'text': self.text,
'db': self.db,
'id': self.id,
'entry_name': self.entry_name,
'status': self.status,
'source': self.source,
}
if self.organism:
js['organism'] = self.organism
return js
def to_list(self):
"""Return the term serialized into a list of strings."""
return [self.norm_text, self.text, self.db, self.id,
self.entry_name, self.status, self.source,
self.organism]
def get_idenfiers_url(self):
return get_identifiers_url(self.db, self.id)
def get_identifiers_url(db, id):
url_pattern = 'https://identifiers.org/{db}:{id}'
if db == 'UP':
db = 'uniprot'
id_parts = id.split(':')
if len(id_parts) == 1:
return url_pattern.format(db=db.lower(), id=id)
elif len(id_parts) == 2:
return url_pattern.format(db=id_parts[0].upper(), id=id_parts[-1])
|
steppi/gilda | benchmarks/medmentions_responsiveness_benchmark.py | # -*- coding: utf-8 -*-
"""This script measures the responsiveness (i.e., speed) of Gilda on the
MedMentions corpus in three settings: when used as a python package,
a local web service or through the remote public web service.
"""
import pathlib
import random
import time
from textwrap import dedent
from typing import Optional
import click
import gilda
import matplotlib.pyplot as plt
import pandas as pd
import requests
import seaborn as sns
from gilda.api import grounder
from more_click import force_option, verbose_option
from tqdm import tqdm, trange
from tqdm.contrib.logging import logging_redirect_tqdm
from medmentions import iterate_corpus
HERE = pathlib.Path(__file__).parent.resolve()
RESULTS = HERE.joinpath("results")
RESULTS.mkdir(exist_ok=True, parents=True)
RESULTS_PATH = RESULTS.joinpath("medmentions_responsiveness.tsv")
RESULTS_AGG_PATH = RESULTS.joinpath("medmentions_responsiveness_aggregated.tsv")
RESULTS_AGG_TEX_PATH = RESULTS.joinpath("medmentions_responsiveness_aggregated.tex")
FIG_PATH = RESULTS.joinpath("medmentions_responsiveness.svg")
FIG_PDF_PATH = RESULTS.joinpath("medmentions_responsiveness.pdf")
def ground_package(text, **_kwargs):
return gilda.ground(text)
def ground_package_context(text, context):
return gilda.ground(text, context=context)
def ground_app_local(text, **_kwargs):
return requests.post("http://localhost:8001/ground", json={"text": text}).json()
def ground_app_local_context(text, context):
return requests.post(
"http://localhost:8001/ground", json={"text": text, "context": context}
).json()
def ground_app_remote(text, **_kwargs):
return requests.post(
"http://grounding.indra.bio/ground", json={"text": text}
).json()
def ground_app_remote_context(text, context):
return requests.post(
"http://grounding.indra.bio/ground", json={"text": text, "context": context}
).json()
#: A list of benchmarks to run with three columns:
#: type, uses context, function
FUNCTIONS = [
("Python package", False, ground_package),
("Python package", True, ground_package_context),
("Local web app", False, ground_app_local),
("Local web app", True, ground_app_local_context),
("Public web app", False, ground_app_remote),
("Public web app", True, ground_app_remote_context),
]
def run_trial(
*, trials, corpus, func, desc: Optional[str] = None, chunk: Optional[int] = None
):
rv = []
outer_it = trange(trials, desc=desc)
for trial in outer_it:
random.shuffle(corpus)
test_corpus = corpus[:chunk] if chunk else corpus
inner_it = tqdm(test_corpus, desc="Examples", leave=False)
for document_id, abstract, umls_id, text, start, end, types in inner_it:
with logging_redirect_tqdm():
start = time.time()
matches = func(text, context=abstract)
rv.append((trial, len(matches), time.time() - start))
return rv
def build(trials: int, chunk: Optional[int] = None) -> pd.DataFrame:
click.secho("Preparing MedMentions corpus")
corpus = list(iterate_corpus())
click.secho("Warming up python grounder")
grounder.get_grounder()
click.secho("Warming up local api grounder")
ground_app_local_context("ER", context="Calcium is released from the ER.")
click.secho("Warming up remote api grounder")
ground_app_remote_context("ER", context="Calcium is released from the ER.")
rows = []
for tag, uses_context, func in FUNCTIONS:
rv = run_trial(
trials=trials,
chunk=chunk,
corpus=corpus,
func=func,
desc=f"{tag}{' with context' if uses_context else ''} trial",
)
rows.extend((tag, uses_context, *row) for row in rv)
df = pd.DataFrame(rows, columns=["type", "context", "trial", "matches", "duration"])
return df
@click.command()
@click.option("--trials", type=int, default=2, show_default=True)
@click.option(
"--chunk",
type=int,
help="Subsample size from full corpus. Defaults to full corpus if not given.",
)
@verbose_option
@force_option
def main(trials: int, chunk: Optional[int], force: bool):
if RESULTS_PATH.is_file() and not force:
df = pd.read_csv(RESULTS_PATH, sep="\t")
else:
df = build(trials=trials, chunk=chunk)
df.to_csv(RESULTS_PATH, sep="\t", index=False)
# convert from seconds/response to responses/second
df["duration"] = df["duration"].map(lambda x: 1 / x)
_grouped = df[["type", "context", "duration"]].groupby(["type", "context"])
agg_mean_df = _grouped.mean()
agg_mean_df.rename(columns={"duration": "duration_mean"}, inplace=True)
agg_std_df = _grouped.std()
agg_std_df.rename(columns={"duration": "duration_std"}, inplace=True)
agg_df = pd.merge(agg_mean_df, agg_std_df, left_index=True, right_index=True)
agg_df = agg_df.round(1)
agg_df.to_csv(RESULTS_AGG_PATH, sep="\t")
agg_df.to_latex(
RESULTS_AGG_TEX_PATH,
label="tab:medmentions-responsiveness-benchmark",
caption=dedent(
f"""\
Benchmarking of the responsiveness of the Gilda service when running synchronously
through its Python package, when run locally as a web service, and when run remotely
as a web service. Each scenario was also tested with and without context added.
The Python usage had the fastest time due to the lack of overhead from
network communication. The local web service performed better than the remote one
for the same reason in addition to the possibility of external users requesting at the
same time.
"""
),
)
fig, ax = plt.subplots(figsize=(6, 3))
sns.boxplot(data=df, y="duration", x="type", hue="context", ax=ax)
ax.set_title("Gilda Responsiveness Benchmark on MedMentions")
ax.set_yscale("log")
ax.set_ylabel("Responses per Second")
ax.set_xlabel("")
fig.savefig(FIG_PATH)
fig.savefig(FIG_PDF_PATH)
if __name__ == "__main__":
main()
|
steppi/gilda | scripts/grounding_cell_designer.py | """This script can process a set of Cell Designer XML files that are
part of the COVID-19 Disease Map project, detect species with missing
grounding, ground these based on their string name with Gilda, and
serialize the changes back into XML files."""
import re
import itertools
from collections import defaultdict
from pathlib import Path
from xml.etree import ElementTree as ET
import click
from indra.databases import identifiers
from tqdm import tqdm
import gilda
rdf_str = (
b'<rdf:RDF '
b'xmlns:bqbiol="http://biomodels.net/biology-qualifiers/" '
b'xmlns:bqmodel="http://biomodels.net/model-qualifiers/" '
b'xmlns:dc="http://purl.org/dc/elements/1.1/" '
b'xmlns:dcterms="http://purl.org/dc/terms/" '
b'xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" '
b'xmlns:vCard="http://www.w3.org/2001/vcard-rdf/3.0#">'
)
namespaces = {'sbml': 'http://www.sbml.org/sbml/level2/version4',
'celldesigner': 'http://www.sbml.org/2001/ns/celldesigner',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'bqbiol': 'http://biomodels.net/biology-qualifiers/',
'bqmodel': 'http://biomodels.net/model-qualifiers/',
'vCard': 'http://www.w3.org/2001/vcard-rdf/3.0#',
'dc': 'http://purl.org/dc/elements/1.1/',
'xhtml': 'http://www.w3.org/1999/xhtml'}
irrelevant_grounding_ns = {'pubmed', 'taxonomy', 'doi', 'wikipathways', 'pdb',
'intact', 'biogrid', 'pmc'}
relevant_grounding_ns = {'ncbiprotein', 'ncbigene', 'uniprot', 'obo.go',
'obo.chebi', 'pubchem.compound', 'pubchem.substance',
'hgnc', 'hgnc.symbol', 'mesh', 'interpro',
'refseq', 'ensembl', 'ec-code', 'brenda',
'kegg.compound', 'drugbank'}
irrelevant_classes = {'DEGRADED'}
grounding_stats = {'ngrounding': 0}
def register_all_namespaces(fname):
namespaces = dict([node for _, node in
ET.iterparse(fname, events=['start-ns'])])
for ns in namespaces:
ET.register_namespace(ns, namespaces[ns])
def get_protein_reference(species):
protein_ref = species.find('sbml:annotation/'
'celldesigner:extension/'
'celldesigner:speciesIdentity/'
'celldesigner:proteinReference',
namespaces=namespaces)
if protein_ref is None:
protein_ref = species.find('celldesigner:annotation/'
'celldesigner:speciesIdentity/'
'celldesigner:proteinReference',
namespaces=namespaces)
if protein_ref is None:
protein_ref = species.find('celldesigner:extension/'
'celldesigner:speciesIdentity/'
'celldesigner:proteinReference',
namespaces=namespaces)
if protein_ref is not None:
protein_ref_id = protein_ref.text
return protein_ref_id
def add_groundings(et):
all_species = et.findall('sbml:model/sbml:listOfSpecies/sbml:species',
namespaces=namespaces)
print('%d species found with first method' % len(all_species))
all_species += et.findall('sbml:model/sbml:annotation/'
'celldesigner:extension/'
'celldesigner:listOfIncludedSpecies/'
'celldesigner:species',
namespaces=namespaces)
print('%d species found after second method' % len(all_species))
# This is to collect protein reference-based groundings first
groundings_for_prot_ref = defaultdict(set)
species_for_prot_ref = defaultdict(set)
for species in all_species:
protein_ref_id = get_protein_reference(species)
if protein_ref_id:
existing_grounding = get_existing_grounding(species)
groundings_for_prot_ref[protein_ref_id] |= \
set(existing_grounding) & relevant_grounding_ns
species_for_prot_ref[protein_ref_id].add(species.attrib['id'])
for species in tqdm(all_species, desc='Species', leave=False):
# Skip if we have a grounding via the protein reference, indirectly
protein_ref_id = get_protein_reference(species)
if protein_ref_id and groundings_for_prot_ref.get(protein_ref_id):
continue
existing_grounding = get_existing_grounding(species)
# Important: this is where we decide if we will add any grounding.
# Here we skip this species if it has any relevant grounding
if set(existing_grounding) & relevant_grounding_ns:
continue
class_tag = species.find(
'sbml:annotation/celldesigner:extension/'
'celldesigner:speciesIdentity/celldesigner:class',
namespaces=namespaces)
if class_tag is not None:
entity_class = class_tag.text
if entity_class in irrelevant_classes:
continue
name = species.attrib.get('name')
tqdm.write(name)
tqdm.write(entity_class)
matches = gilda.ground(name)
if matches:
species = add_grounding_element(species, entity_class,
matches[0].term.db,
matches[0].term.id)
if species:
tqdm.write(' '.join((name, matches[0].term.db, matches[0].term.id,
matches[0].term.entry_name)))
grounding_stats['ngrounding'] += 1
tqdm.write('---')
return et
def get_existing_grounding(species):
# Others: isHomologTo
bqbio_tags = ['isDescribedBy', 'isEncodedBy', 'is', 'encodes',
'occursIn']
prefixes = ['sbml:annotation', 'celldesigner:notes/xhtml:html/xhtml:body']
groundings = defaultdict(list)
for prefix, tag in itertools.product(prefixes, bqbio_tags):
grounding_elements = \
species.findall('%s/rdf:RDF/rdf:Description/'
'bqbiol:%s/rdf:Bag/rdf:li' % (prefix, tag),
namespaces=namespaces)
for element in grounding_elements:
urn = element.attrib['{http://www.w3.org/1999/02/22-rdf-syntax-ns#}'
'resource']
match = re.match(r'urn:miriam:([^:]+):(.+)', urn)
if not match:
tqdm.write(f'Unmatched urn: {urn}')
continue
else:
db_ns, db_id = match.groups()
groundings[db_ns].append(db_id)
return groundings
def add_grounding_element(species, entity_class, db_ns, db_id):
# For genes, if we're grounding a protein, we make the encoding aspect
# explicit
if entity_class == 'PROTEIN' and db_ns == 'HGNC':
bqbiol_tag = 'bqbiol:isEncodedBy'
# In case a protein is grounded to CHEBI, it's typically a problem, we
# skip these
elif entity_class == 'PROTEIN' and db_ns == 'CHEBI':
return
else:
bqbiol_tag = 'bqbiol:is'
tag_sequence = [
'sbml:annotation',
'rdf:RDF',
'rdf:Description',
bqbiol_tag,
'rdf:Bag',
]
identifiers_ns = identifiers.get_identifiers_ns(db_ns)
grounding_str = 'urn:miriam:%s:%s' % (identifiers_ns, db_id)
root = species
for tag in tag_sequence:
element = root.find(tag, namespaces=namespaces)
if element is not None:
root = element
else:
new_element = ET.Element(tag)
new_element.text = '\n'
new_element.tail = '\n'
root.append(new_element)
root = new_element
li = ET.Element('rdf:li', attrib={'rdf:resource': grounding_str})
root.append(li)
return species
def dump(et, fname):
xml_str = ET.tostring(et.getroot(), xml_declaration=False,
encoding='UTF-8')
xml_str = b'<?xml version="1.0" encoding="UTF-8"?>\n' + xml_str
xml_str = xml_str.replace(b'ns0:', b'')
xml_str = xml_str.replace(b'ns1:', b'')
xml_str = xml_str.replace(b' />', b'/>')
xml_str = xml_str.replace(b'<html>',
b'<html xmlns="http://www.w3.org/1999/xhtml">')
xml_str = xml_str.replace(b'<rdf:RDF>', rdf_str)
xml_str = xml_str.replace(b'xmlns:ns0="http://www.sbml.org/sbml/level2/version4"',
b'')
xml_str = xml_str.replace(b'xmlns:ns1="http://www.w3.org/1999/xhtml"', b'')
with open(fname, 'wb') as fh:
fh.write(xml_str)
@click.command()
@click.argument('directory', type=Path)
def main(directory: Path):
"""Run grounding on the directory for the COVID 19 Disease Maps repository."""
stable_xmls = list(directory.resolve().rglob('*_stable.xml'))
it = tqdm(stable_xmls, desc='Grounding')
for stable_xml in it:
it.set_postfix(file=stable_xml.name)
register_all_namespaces(stable_xml)
et = ET.parse(stable_xml)
et = add_groundings(et)
dump(et, stable_xml)
if __name__ == '__main__':
gilda.ground('x')
main()
|
steppi/gilda | benchmarks/bioid_f1_breakdown.py | <reponame>steppi/gilda
import os
import json
from bioid_evaluation import BioIDBenchmarker
HERE = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(HERE, 'data')
try:
with open(os.path.join(data_path, 'equivalences.json')) as f:
equivalences = json.load(f)
except FileNotFoundError:
equivalences = {}
benchmarker = BioIDBenchmarker(equivalences=equivalences)
benchmarker.ground_entities_with_gilda(context=True)
df = benchmarker.processed_data
df['top_grounding_no_context'] = df.groundings_no_context.\
apply(lambda x: x[0][0] if x else None)
fp = \
df[
(~df.top_correct_no_context) &
(
df.top_grounding_no_context.apply(
lambda x: x is not None and x.startswith('HGNC'))
)
]
tp = \
df[
(df.top_correct_no_context) &
(
df.top_grounding_no_context.apply(
lambda x: x is not None and x.startswith('HGNC')
)
)
]
pos = df[
df.obj_synonyms.apply(lambda x: any([y.startswith('HGNC') for y in x]))
]
pr = len(tp) / (len(tp) + len(fp))
rc = len(tp) / len(pos)
f1 = 2/(1/pr + 1/rc)
df['top_grounding'] = df.groundings.\
apply(lambda x: x[0][0] if x else None)
fp_disamb = \
df[
(~df.top_correct) &
(
df.top_grounding.apply(
lambda x: x is not None and x.startswith('HGNC'))
)
]
tp_disamb = \
df[
(df.top_correct) &
(
df.top_grounding.apply(
lambda x: x is not None and x.startswith('HGNC')
)
)
]
pos_disamb = df[
df.obj_synonyms.apply(lambda x: any([y.startswith('HGNC') for y in x]))
]
pr_disamb = len(tp_disamb) / (len(tp_disamb) + len(fp_disamb))
rc_disamb = len(tp_disamb) / len(pos_disamb)
f1_disamb = 2/(1/pr_disamb + 1/rc_disamb)
|
steppi/gilda | scripts/generate_uniprot_chain_proonto_mappings.py | import re
import csv
import obonet
from collections import defaultdict
from gilda.term import Term
from gilda.grounder import Grounder, normalize
from biomappings.resources import append_prediction_tuples
# This is from the Reach bioresources file as a convenient
# source of pre-processed synonyms
UNIPROT = '/Users/ben/src/bioresources/kb/uniprot-proteins.tsv'
PROONTO = '/Users/ben/src/bioresources/kb/protein-ontology-fragments.tsv'
PROONTO_OBO = '/Users/ben/src/bioresources/scripts/pro_reasoned.obo'
def organism_filter(organism):
if organism == 'Human':
return True
if organism == 'SARS-CoV-2':
return True
return False
def get_uniprot_terms():
terms = {}
with open(UNIPROT, 'r') as fh:
reader = csv.reader(fh, delimiter='\t')
for row in reader:
if '#' in row[1] and organism_filter(row[2]):
synonym = row[0]
id = row[1].split('#')[1]
term = Term(normalize(synonym), synonym, 'UP',
id, synonym, 'synonym', 'uniprot')
terms[str(term.to_json())] = term
return list(terms.values())
def pre_process_synonym(synonym):
synonyms = []
synonyms.append(synonym)
remove_suffix = re.sub(r'(.*) (\(.*)\)$', '\\1', synonym)
synonyms.append(remove_suffix)
return synonyms
def ground_proonto_terms(grounder):
matches_per_id = defaultdict(list)
with open(PROONTO, 'r') as fh:
reader = csv.reader(fh, delimiter='\t')
for row in reader:
synonym, id = row
for text in pre_process_synonym(synonym):
matches = grounder.ground(text)
if matches:
matches_per_id[id] += matches
return matches_per_id
def dump_predictions():
# source prefix, source identifier, source name, relation
# target prefix, target identifier, target name, type, source
source_prefix = 'pr'
target_prefix = 'uniprot.chain'
relation = 'skos:exactMatch'
source = 'https://github.com/indralab/gilda/blob/master/scripts/' \
'generate_uniprot_chain_proonto_mappings.py'
match_type = 'lexical'
rows = []
pro = obonet.read_obo(PROONTO_OBO)
for pro_id, matches in matches_per_id.items():
target_id = matches[0].term.id
target_name = matches[0].term.entry_name
source_name = pro.nodes[pro_id]['name']
row = (source_prefix, pro_id, source_name, relation,
target_prefix, target_id, target_name, match_type,
0.8, source)
rows.append(row)
append_prediction_tuples(rows, deduplicate=True)
if __name__ == '__main__':
# 1. Parse all the UniProt synonyms that are for human
# protein fragments into Gilda Terms
terms = get_uniprot_terms()
# 2. Instantiate a grounder with these terms
terms_dict = defaultdict(list)
for term in terms:
terms_dict[term.norm_text].append(term)
grounder = Grounder(terms_dict)
# 3. Parse all the Protein Ontology synonyms and ground each of them, then
# store the results
matches_per_id = ground_proonto_terms(grounder)
# 4. Dump spreadsheet with non-ambiguous equivalences in BioMappings format
dump_predictions() |
steppi/gilda | gilda/resources/__main__.py | from . import get_grounding_terms, get_gilda_models
if __name__ == '__main__':
get_grounding_terms()
get_gilda_models()
|
steppi/gilda | setup.py | import re
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as fh:
long_description = fh.read()
with open(path.join(here, 'gilda', '__init__.py'), 'r') as fh:
for line in fh.readlines():
match = re.match(r'__version__ = \'(.+)\'', line)
if match:
gilda_version = match.groups()[0]
break
else:
raise ValueError('Could not get version from gilda/__init__.py')
setup(name='gilda',
version=gilda_version,
description=('Grounding for biomedical entities with contextual '
'disambiguation'),
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/indralab/gilda',
author='<NAME>, Harvard Medical School',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
packages=find_packages(),
install_requires=['regex', 'adeft>=0.4.0', 'boto3', 'flask',
'flask-wtf', 'flask-bootstrap', 'flask-restx',
'obonet', 'pystow>=0.1.10'],
extras_require={'test': ['nose', 'coverage'],
'terms': ['indra'],
'benchmarks': ['pandas>=1.0', 'requests',
'tabulate', 'tqdm', 'click']},
keywords=['nlp', 'biology'],
include_package_data=True,
entry_points={
'console_scripts': [
'gilda = gilda.app:main',
],
},
)
|
steppi/gilda | benchmarks/medmentions.py | """This script grounds entries in the MedMentions data set available at
https://github.com/chanzuckerberg/MedMentions. This can serve both as a
benchmark and as a bsis for creating mappings between the namespaces
Gilda grounds to and UMLS, which is used for MedMentions groundings.
.. code-block:: bibtex
@article{Mohan2019,
archivePrefix = {arXiv},
arxivId = {1902.09476},
author = {<NAME> and <NAME>},
eprint = {1902.09476},
month = {feb},
title = {{MedMentions: A Large Biomedical Corpus Annotated with UMLS Concepts}},
url = {http://arxiv.org/abs/1902.09476},
year = {2019}
}
"""
import csv
import json
import click
import gilda
import pyobo
import pystow
from more_click import verbose_option
from pubtator_loader import from_gz
from tqdm import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
URL = "https://github.com/chanzuckerberg/MedMentions/raw/master/full/data/corpus_pubtator.txt.gz"
MODULE = pystow.module("gilda", "medmentions")
CORPUS_PATH = MODULE.join(name="corpus.json")
MATCHING_PATH = pystow.join(
"gilda", "medmentions", gilda.__version__, name="matching.tsv"
)
#: Subset of types from the Semantic Type Ontology used in annotation
TYPES = {
"T116": "Amino Acid, Peptide, or Protein",
"T123": "Biologically Active Substance",
"T047": "Disease or Syndrome",
"T101": "Patient or Disabled Group",
"T079": "Temporal Concept",
"T169": "Functional Concept",
"T033": "Finding",
"T081": "Quantitative Concept",
"T063": "Molecular Biology Research Technique",
"T052": "Activity",
"T062": "Research Activity",
"T032": "Organism Attribute",
"T098": "Population Group",
"T100": "Age Group",
"T073": "Manufactured Object",
"T093": "Health Care Related Organization",
"T026": "Cell Component",
"T028": "Gene or Genome",
"T007": "Bacterium",
"T045": "Genetic Function",
"T046": "Pathologic Function",
"T131": "Hazardous or Poisonous Substance",
"T043": "Cell Function",
"T025": "Cell",
"T067": "Phenomenon or Process",
"T069": "Environmental Effect of Humans",
"T167": "Substance",
"T037": "Injury or Poisoning",
"T001": "Organism",
"T031": "Body Substance",
"T080": "Qualitative Concept",
"T121": "Pharmacologic Substance",
"T196": "Element, Ion, or Isotope",
"T044": "Molecular Function",
"T126": "Enzyme",
"T077": "Conceptual Entity",
"T082": "Spatial Concept",
"T103": "Chemical",
"T078": "Idea or Concept",
"T039": "Physiologic Function",
"T109": "Organic Chemical",
"T122": "Biomedical or Dental Material",
"T074": "Medical Device",
"T058": "Health Care Activity",
"T070": "Natural Phenomenon or Process",
"T057": "Occupational Activity",
"T023": "Body Part, Organ, or Organ Component",
"T059": "Laboratory Procedure",
"T042": "Organ or Tissue Function",
"T060": "Diagnostic Procedure",
"T184": "Sign or Symptom",
"T024": "Tissue",
"T029": "Body Location or Region",
"T053": "Behavior",
"T061": "Therapeutic or Preventive Procedure",
"T083": "Geographic Area",
"T055": "Individual Behavior",
"T185": "Classification",
"UnknownType": "UnknownType",
"T054": "Social Behavior",
"T170": "Intellectual Product",
"T097": "Professional or Occupational Group",
"T041": "Mental Process",
"T095": "Self-help or Relief Organization",
"T099": "Family Group",
"T048": "Mental or Behavioral Dysfunction",
"T071": "Entity",
"T204": "Eukaryote",
"T168": "Food",
"T019": "Congenital Abnormality",
"T201": "Clinical Attribute",
"T040": "Organism Function",
"T191": "Neoplastic Process",
"T049": "Cell or Molecular Dysfunction",
"T114": "Nucleic Acid, Nucleoside, or Nucleotide",
"T002": "Plant",
"T090": "Occupation or Discipline",
"T096": "Group",
"T075": "Research Device",
"T034": "Laboratory or Test Result",
"T197": "Inorganic Chemical",
"T195": "Antibiotic",
"T091": "Biomedical Occupation or Discipline",
"T008": "Animal",
"T102": "Group Attribute",
"T012": "Bird",
"T011": "Amphibian",
"T200": "Clinical Drug",
"T125": "Hormone",
"T129": "Immunologic Factor",
"T127": "Vitamin",
"T056": "Daily or Recreational Activity",
"T087": "Amino Acid Sequence",
"T086": "Nucleotide Sequence",
"T005": "Virus",
"T065": "Educational Activity",
"T192": "Receptor",
"T022": "Body System",
"T015": "Mammal",
"T038": "Biologic Function",
"T130": "Indicator, Reagent, or Diagnostic Aid",
"T030": "Body Space or Junction",
"T017": "Anatomical Structure",
"T068": "Human-caused Phenomenon or Process",
"T016": "Human",
"T050": "Experimental Model of Disease",
"T089": "Regulation or Law",
"T085": "Molecular Sequence",
"T104": "Chemical Viewed Structurally",
"T064": "Governmental or Regulatory Activity",
"T190": "Anatomical Abnormality",
"T092": "Organization",
"T051": "Event",
"T004": "Fungus",
"T171": "Language",
"T014": "Reptile",
"T120": "Chemical Viewed Functionally",
"T020": "Acquired Abnormality",
"T013": "Fish",
"T194": "Archaeon",
"T072": "Physical Object",
"T018": "Embryonic Structure",
"T066": "Machine Activity",
"T010": "Vertebrate",
"T094": "Professional Society",
"T021": "Fully Formed Anatomical Structure",
"T203": "Drug Delivery Device",
}
def get_corpus():
"""Get the MedMentions corpus.
:return: A list of dictionaries of the following form:
.. code-block::
[
{
"id": 25763772,
"title_text": "DCTN4 as a modifier of chronic Pseudomonas aeruginosa infection in cystic fibrosis",
"abstract_text": "...",
"entities": [
{
"document_id": 25763772,
"start_index": 0,
"end_index": 5,
"text_segment": "DCTN4",
"semantic_type_id": "T116,T123",
"entity_id": "C4308010"
},
...
]
},
...
]
"""
if not CORPUS_PATH.is_file():
path = MODULE.ensure(url=URL)
corpus = from_gz(path)
with CORPUS_PATH.open("w") as file:
json.dump(corpus, file, indent=2, default=lambda o: o.__dict__)
# Right now I'd rather not engage with the strange object structure, so
# serializing and deserializing gets us JSON we can work with.
with CORPUS_PATH.open() as file:
return json.load(file)
def iterate_corpus():
corpus = get_corpus()
click.echo(f"MedMentions has {len(corpus)} entries")
for document in tqdm(corpus, unit="document", desc="MedMentions"):
document_id = document["id"]
abstract = document["abstract_text"]
for entity_idx, entity in enumerate(document["entities"]):
umls_id = entity["entity_id"]
text = entity["text_segment"]
start, end = entity["start_index"], entity["end_index"]
types = set(entity["semantic_type_id"].split(","))
yield document_id, abstract, umls_id, text, start, end, types
HEADER = [
"document",
"start_idx",
"end_idx",
"text",
"umls_id",
"umls_name",
"gilda_prefix",
"gilda_identifier",
"gilda_name",
"gilda_score",
]
@click.command()
@verbose_option
def main():
corpus = get_corpus()
click.echo(f"There are {len(corpus)} entries")
rows = []
for document_id, abstract, umls_id, text, start, end, types in iterate_corpus():
with logging_redirect_tqdm():
matches = gilda.ground(text, context=abstract)
rows.extend(
(
document_id,
start,
end,
text,
umls_id,
pyobo.get_name("umls", umls_id),
match.term.db,
match.term.id,
match.term.entry_name,
match.score,
)
for match in matches
)
with MATCHING_PATH.open("w") as file:
writer = csv.writer(file, delimiter="\t")
writer.writerow(HEADER)
writer.writerows(rows)
if __name__ == "__main__":
main()
|
steppi/gilda | scripts/generate_mesh_mappings.py | <filename>scripts/generate_mesh_mappings.py
import os
from collections import defaultdict
from gilda.generate_terms import *
from indra.databases import mesh_client
resources = os.path.join(os.path.dirname(__file__), os.path.pardir,
'gilda', 'resources')
def is_protein(mesh_id):
return mesh_client.is_protein(mesh_id) or mesh_client.is_enzyme(mesh_id)
def is_chemical(mesh_id):
return mesh_client.is_molecular(mesh_id)
def render_row(me, te):
return '\t'.join([me.db, me.id, me.entry_name,
te.db, te.id, te.entry_name])
def get_nonambiguous(maps):
# If there are more than one mappings from MESH
if len(maps) > 1:
# We see if there are any name-level matches
name_matches = [(me, te) for me, te in maps
if me.entry_name.lower() == te.entry_name.lower()]
# If we still have ambiguity, we print to the user
if not name_matches or len(name_matches) > 1:
return None, maps
# Otherwise. we add the single name matches mapping
else:
return name_matches[0], []
# If we map to only one thing, we keep that mapping
else:
return list(maps)[0], []
def resolve_duplicates(mappings):
keep_mappings = []
all_ambigs = []
# First we deal with mappings from MESH
for maps in mappings.values():
maps_list = maps.values()
keep, ambigs = get_nonambiguous(maps_list)
if keep:
keep_mappings.append(keep)
if ambigs:
all_ambigs += ambigs
# Next we deal with mappings to MESH
reverse_mappings = defaultdict(list)
for mesh_term, other_term in keep_mappings:
reverse_mappings[(other_term.db, other_term.id)].append((mesh_term,
other_term))
keep_mappings = []
for maps in reverse_mappings.values():
keep, ambigs = get_nonambiguous(maps)
if keep:
keep_mappings.append(keep)
if ambigs:
all_ambigs += ambigs
return keep_mappings, all_ambigs
def dump_mappings(mappings, fname):
with open(fname, 'w') as fh:
for mesh_term, other_term in sorted(mappings, key=lambda x: x[0].id):
fh.write(render_row(mesh_term, other_term) + '\n')
def get_ambigs_by_db(ambigs):
ambigs_by_db = defaultdict(list)
for term in ambigs:
ambigs_by_db[term.db].append(term)
return dict(ambigs_by_db)
def get_mesh_mappings(ambigs):
mappings_by_mesh_id = defaultdict(dict)
for text, ambig_terms in ambigs.items():
ambigs_by_db = get_ambigs_by_db(ambig_terms)
print('Considering %s' % text)
for term in ambig_terms:
print('%s:%s %s' % (term.db, term.id, term.entry_name))
order = [('FPLX', is_protein),
('HGNC', is_protein),
('CHEBI', is_chemical),
('GO', lambda x: True),
('DOID', lambda x: True),
('HP', lambda x: True),
('EFO', lambda x: True)]
me = ambigs_by_db['MESH'][0]
for ns, mesh_constraint in order:
if len(ambigs_by_db.get(ns, [])) == 1 and mesh_constraint(me.id):
mappings_by_mesh_id[me.id][(ambigs_by_db[ns][0].db,
ambigs_by_db[ns][0].id)] = \
(me, ambigs_by_db[ns][0])
print('Adding mapping for %s' % ns)
break
print('--------------')
return mappings_by_mesh_id
def find_ambiguities(terms, match_attr='text'):
match_fun = lambda x: x.text if match_attr == 'text' else x.norm_text
ambig_entries = defaultdict(list)
for term in terms:
# We consider it an ambiguity if the same text entry appears
# multiple times
ambig_entries[match_fun(term)].append(term)
# It's only an ambiguity if there are two entries at least
ambig_entries = {k: v for k, v in ambig_entries.items() if len(v) >= 2}
# We filter out any ambiguities that contain not exactly one MeSH term
ambig_entries = {k: v for k, v in ambig_entries.items()
if len([e for e in v if e.db == 'MESH']) == 1}
print('Found a total of %d relevant ambiguities' % len(ambig_entries))
return ambig_entries
def get_terms():
terms = generate_mesh_terms(ignore_mappings=True) + \
generate_go_terms() + \
generate_hgnc_terms() + \
generate_famplex_terms(ignore_mappings=True) + \
generate_uniprot_terms(download=False) + \
generate_chebi_terms() + \
generate_efo_terms(ignore_mappings=True) + \
generate_hp_terms(ignore_mappings=True) + \
generate_doid_terms(ignore_mappings=True)
terms = filter_out_duplicates(terms)
return terms
def manual_go_mappings(terms):
td = defaultdict(list)
for term in terms:
td[(term.db, term.id)].append(term)
# Migrated from FamPlex and INDRA
map = [
('D002465', 'GO:0048870'),
('D002914', 'GO:0042627'),
('D012374', 'GO:0120200'),
('D014158', 'GO:0006351'),
('D014176', 'GO:0006412'),
('D018919', 'GO:0001525'),
('D048708', 'GO:0016049'),
('D058750', 'GO:0001837'),
('D059767', 'GO:0000725')
]
mappings_by_mesh_id = defaultdict(dict)
for mid, gid in map:
mt = td[('MESH', mid)][0]
gt = td[('GO', gid)][0]
mappings_by_mesh_id[mid][('GO', gid)] = (mt, gt)
return mappings_by_mesh_id
if __name__ == '__main__':
terms = get_terms()
# General ambiguities
ambigs = find_ambiguities(terms, match_attr='text')
mappings = get_mesh_mappings(ambigs)
# Ambiguities that involve long strings but we allow normalized matches
ambigs2 = find_ambiguities(terms, match_attr='norm_text')
ambigs2 = {k: v for k, v in ambigs2.items() if len(k) > 6}
mappings2 = get_mesh_mappings(ambigs2)
for k, v in mappings2.items():
if k not in mappings:
mappings[k] = v
mappings3 = manual_go_mappings(terms)
for k, v in mappings3.items():
if k not in mappings:
mappings[k] = v
mappings, mapping_ambigs = resolve_duplicates(mappings)
dump_mappings(mappings, os.path.join(resources, 'mesh_mappings.tsv'))
dump_mappings(mapping_ambigs,
os.path.join(resources, 'mesh_ambig_mappings.tsv'))
|
mgleeming/eicExtractor | eicExtractor.py | <reponame>mgleeming/eicExtractor<gh_stars>0
import os, sys, pymzml, argparse
import numpy as np
DEFAULT_EXTRACTION_WIDTH = 0.01
parser = argparse.ArgumentParser(
description = 'Extract chromatographic data from mzML files'
)
parser.add_argument('mzmlFile',
type = str,
help = 'File path of mzML data files. To specify multiple mzML files, include multiple \
argument/value pairs. For example --mzmlFile sample1.mzML --mzmlFile sample2.mzML \
--mzmlFile sample3.mzML'
)
parser.add_argument('eicTarget',
type = float,
help = 'Target ion for chromatogram plotting.'
)
parser.add_argument('--eicWidth',
default = DEFAULT_EXTRACTION_WIDTH,
type = float,
help = 'Width (in m/z) used to produce EIC plots'
)
def getEICData(options):
targetLL = options.eicTarget - options.eicWidth
targetHL = options.eicTarget + options.eicWidth
print('Processing %s' %options.mzmlFile)
print('EIC target: %s' %options.eicTarget)
print('EIC target LL: %s' %targetLL)
print('EIC target HL: %s' %targetHL)
outFile = os.path.join(
os.path.dirname(options.mzmlFile),
'EIC_%s_%s.txt' %(
options.eicTarget,
os.path.basename(options.mzmlFile)
)
)
ofx = open(outFile, 'wt')
ofx.write('#RT\tIntensity\n')
spectra = pymzml.run.Reader(options.mzmlFile)
for s in spectra:
if s.ms_level != 1: continue
time = s.scan_time_in_minutes()
mzs = s.mz
ints = s.i
mask = np.where( (mzs > targetLL) & (mzs < targetHL) )
intsubset = ints[mask]
ofx.write('%s\t%s\n'%(time, np.sum(intsubset)))
ofx.close()
return
def main(options):
getEICData(options)
if __name__ == '__main__':
options = parser.parse_args()
main(options)
|
bunto1/stats | scripts/generate_boxscore.py | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Generating a boxscore view/chart/table from a single game."""
# Here comes your imports
import sys
import logging as log
import pandas as pd
# Here comes your (few) global variables
# Here comes your class definitions
# Here comes your function definitions
def get_player_and_goalie_count(players):
"""get count of players and goalies"""
log.debug(players)
reg_p = r'^(\d+|\?)$'
reg_g = r'^(g\d+|g\?)$'
lam_p = lambda col: col.str.contains(reg_p, regex=True, na=False)
lam_g = lambda col: col.str.contains(reg_g, regex=True, na=False)
out = pd.DataFrame([], index=players.index)
out['players'] = players.apply(lam_p, axis=1).sum(axis=1)
out['goalies'] = players.apply(lam_g, axis=1).sum(axis=1)
return out
def get_strength(goals):
"""get the strenght (even, pp, pk) from the goals data"""
team = goals['team']
log.debug(team)
col = ['hm_' + str(i) for i in range(1, 7)]
pl_home = goals[col]
log.debug(get_player_and_goalie_count(pl_home))
col = ['aw_' + str(i) for i in range(1, 7)]
pl_away = goals[col]
log.debug(get_player_and_goalie_count(pl_away))
def main():
"""Launcher."""
# log.basicConfig(level=log.DEBUG)
log.debug('Number of arguments: %d arguments.', len(sys.argv))
log.debug('Argument List: %s', str(sys.argv))
datadir = 'data/'
filename = 'x'
datafile = datadir + filename + '.pkl.xz'
log.debug(datafile)
data = pd.read_pickle(datafile)
log.debug(data.info())
goals = data[data['sh_outc'] == 'G']
print(goals)
get_strength(goals)
if __name__ == "__main__":
main()
|
bunto1/stats | scripts/parse_game_type_x.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Parsing of a csv game tracking sheet of type 'X', saving data in consice and relevant manner."""
# Here comes your imports
import sys
import logging as log
import pandas as pd
# Here comes your (few) global variables
# Here comes your class definitions
# Here comes your function definitions
def parse_pre_shot_situation(data, out):
"""parse the situation leading to the shot"""
# (cycle / free-hit / develop / counter / turnover / rebound / penalty / others)
situation_labels = \
['Festsetzen', 'Freischlag', 'Auslösung', 'Konter', \
'Ballgewinn', 'Abpraller', 'Penalty', 'Sonstige']
situation_categories = \
['CYC', 'FHT', 'DVL', 'CNT', 'TNV', 'RBD', 'PNT', 'OTH']
shot_situations = data[situation_labels]
shot_situations.columns = situation_categories
situation_count = shot_situations.notna().sum(axis=1)
if (situation_count != 1).any():
log.warning('no pre shot situation:\n%s', shot_situations[situation_count < 1])
log.warning('multiple pre shot situations:\n%s', shot_situations[situation_count > 1])
situation = pd.Categorical([''] * len(shot_situations.index), categories=situation_categories)
for label, content in shot_situations.items():
situation[content.notna()] = label
log.debug(pd.Series(situation))
log.debug(pd.Series(situation).value_counts())
out['sh_situ'] = pd.Series(situation)
def parse_shot_type(data, out):
"""parse the type of the shot"""
# (wrist / chip / slap / backhand / one-timer / volley / tip / in-tight)
type_labels = \
['Gezogen', 'Chip', 'Slapshot', 'Backhand', 'Direkt', 'Volley', 'Ablenker', 'InTight']
type_categories = \
['WRS', 'CHP', 'SLP', 'BKH', 'ONT', 'VOL', 'TIP', 'INT']
shot_types = data[type_labels]
shot_types.columns = type_categories
type_count = shot_types.notna().sum(axis=1)
if (type_count != 1).any():
log.warning('no shot type:\n%s', shot_types[type_count < 1])
log.warning('multiple shot types:\n%s', shot_types[type_count > 1])
shot_type = pd.Categorical([''] * len(shot_types.index), categories=type_categories)
for label, content in shot_types.items():
shot_type[content.notna()] = label
log.debug(pd.Series(shot_type))
log.debug(pd.Series(shot_type).value_counts())
out['sh_type'] = pd.Series(shot_type)
def parse_shot_result(data, out):
"""parse the result (blocked / missed / on-goal / goal) of the event / shot"""
result_categories = ['BL', 'MI', 'SOG', 'G']
shot_results = data[result_categories]
log.debug(shot_results.info())
result_count = shot_results.notna().sum(axis=1)
if (result_count < 1).any():
log.warning('no shot result:\n%s', shot_results[result_count < 1])
if (result_count > 1).any():
log.debug('multiple shot results:\n%s', shot_results[result_count > 1])
result = pd.Categorical([''] * len(shot_results.index), categories=result_categories)
for label, content in shot_results.items():
result[content.notna()] = label
log.debug(pd.Series(result))
log.debug(pd.Series(result).value_counts())
out['sh_outc'] = pd.Series(result)
def parse_involved_players_for(data, out):
"""parse the involved (on-field) players for"""
prefix = 'hm_'
players_goalies = data.filter(regex=("^g?[0-9]+$"))
numbers = pd.Series(list(players_goalies))
col = [prefix + str(i) for i in range(1, 7)]
players = pd.DataFrame('', index=players_goalies.index, columns=col)
for index, event in players_goalies.iterrows():
players_on = numbers[event.notna().values]
player_count = len(players_on)
if len(col) >= player_count:
players.iloc[index, 0:player_count] = players_on.values
else:
log.warning('too many players, index : %d', index)
log.debug(players_on)
log.debug(players)
for label, content in players.items():
out[label] = content
def parse_involved_players_against(data, out):
"""parse the involved (on-field) players against"""
prefix = 'aw_'
suffix = '_against'
players_goalies = data[['players' + suffix, 'goalie' + suffix]]
default_number = '?'
col = [prefix + str(i) for i in range(1, 7)]
players = pd.DataFrame('', index=players_goalies.index, columns=col)
for index, event in players_goalies.iterrows():
players_on = \
([default_number] * event.loc['players' + suffix]) + \
(['g' + default_number] * event.loc['goalie' + suffix])
player_count = len(players_on)
if len(col) >= player_count:
players.iloc[index, 0:player_count] = players_on
else:
log.warning('too many players, index : %d', index)
log.debug(players_on)
log.debug(players)
for label, content in players.items():
out[label] = content
def parse_acting_players(data, out):
"""parse the acting players (shot, assist, block) from the columns with player numbers"""
players_goalies = data.filter(regex=("^g?[0-9]+$"))
actions = pd.DataFrame('', index=players_goalies.index, columns=['shot', 'assist', 'block'])
for col in players_goalies:
player = players_goalies[col].astype(str)
nbr = col.replace('g', '')
actions['shot'][player.str.match('S')] = nbr
actions['assist'][player.str.match('A')] = nbr
actions['block'][player.str.match('B')] = nbr
log.debug(actions)
log.debug(actions.info())
for label, content in actions.items():
out[label] = content
def parse_team(data, out):
"""parse the team (home/away) from two columns"""
home_name = 'x'
away_name = 'y'
home_away = data[[home_name, away_name]]
log.debug(home_away.info())
team_count = home_away.notna().sum(axis=1)
if (team_count != 1).any():
log.warning('bad team data:\n%s', home_away[team_count != 1])
team = pd.Categorical([''] * len(home_away.index), categories=['home', 'away'])
team[home_away[home_name].notna()] = 'home'
team[home_away[away_name].notna()] = 'away'
log.debug(team)
out['team'] = team
def parse_period(data, out):
"""parse the period int-string into int (OT => 4)"""
period = data['period']
period.replace('OT', '4', inplace=True)
log.debug(period.astype(int))
out['per'] = period.astype(int)
def parse_time(data, out):
"""parse video time float-string into minutes and seconds"""
time_as_str = data['time_vid'].astype(str)
vd_m_s = time_as_str.str.split('.', expand=True)
vd_m_s.set_axis(['vd_m', 'vd_s'], axis=1, inplace=True)
log.debug(vd_m_s)
for label, content in vd_m_s.items():
out[label] = content
time_as_str = data['time_game_m'].astype(str)
gm_m = time_as_str.str.split('.', expand=True).iloc[:, 0]
log.debug(gm_m)
out['gm_m'] = gm_m
def main():
"""Launcher."""
# log.basicConfig(level=log.DEBUG)
log.debug('Number of arguments: %d arguments.', len(sys.argv))
log.debug('Argument List: %s', str(sys.argv))
datadir = 'data/'
filename = 'x'
datafile = datadir + filename + '.csv'
log.debug(datafile)
data = pd.read_csv(datafile, quotechar="'")
log.debug(data)
parsed = pd.DataFrame([], index=data.index)
parse_time(data, parsed)
parse_period(data, parsed)
parse_team(data, parsed)
parse_acting_players(data, parsed)
parse_involved_players_for(data, parsed)
parse_involved_players_against(data, parsed)
parse_shot_result(data, parsed)
parse_shot_type(data, parsed)
parse_pre_shot_situation(data, parsed)
log.debug(parsed)
log.debug(parsed.info())
parsed.to_pickle(datadir + filename + '.pkl.xz')
if __name__ == "__main__":
main()
|
bunto1/stats | scripts/boxscore.py | import pandas as pd
import matplotlib.pyplot as plt
# see: https://towardsdatascience.com/simple-little-tables-with-matplotlib-9780ef5d0bc4
players = pd.DataFrame([
("McDavid", "Connor"), \
("MacKinnon", "Nathan"), \
("Matthews", "Auston") \
], columns=["name", "prename"])
col = ["id", "number"]
roster_home = pd.DataFrame([
(0, 97),
(2, 34)
], columns=col)
roster_away = pd.DataFrame([
(1, 29),
(0, 97)
], columns=col)
goals = pd.DataFrame([
("00:13", "home", 97, 34),
("19:58", "home", 34, 97),
("31:42", "away", 29, 97),
("42:13", "home", 97, 34),
("", "", 0, 0)
], columns=["time", "team", "goal", "assi"])
evt = goals.iloc[0]
nbr = evt.goal
pid = roster_home[roster_home.number==nbr].iloc[0].id
nms = players.iloc[pid]
cell_text = [
['# 4 a. player (#10 b. player)', '1-0', ''],
['#17 c. player (# 9 d. player)', '2-0', ''],
['', '2-1', '# 6 e. player (#26 f. player)'],
['#10 b. player (# 4 a. player)', '3-1', ''],
['', '', '']
]
columns = ('away team', '@', 'home team')
times = ('00:25', '19:58', '25:42', '42:25', '')
the_table = plt.table(cellText=cell_text,
rowLabels=goals.time,
#rowLabels=times,
colLabels=columns,
loc='center')
#the_table.auto_set_font_size(False)
#the_table.set_fontsize(12)
the_table.scale(1, 1.5)
ax = plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.box(on=None)
fig = plt.gcf()
plt.savefig('output/iBoxscore.png', bbox_inches='tight', dpi=150)
|
Ecg-Ai-com/ecgai-overload | tests/test_metaclass.py | <filename>tests/test_metaclass.py
import pytest
from src.ecgai_overload.metaclass import NoMatchingOverload
from tests.mock_class import (
RootClass,
int_overload,
str_overload,
int_str_overload,
int_int_overload,
str_str_overload,
SubClass,
flt_flt_overload,
)
class TestMetaClass:
def test_int_overload_with_named_and_default_parameters(self):
expected = int_overload
sut = RootClass()
result = sut.run()
assert expected == result
def test_str_overload_with_named_parameters(self):
expected = str_overload
sut = RootClass()
result = sut.run(str1="number two")
assert expected == result
def test_int_and_str_overload_with_named_parameters(self):
expected = int_str_overload
sut = RootClass()
result = sut.run(num1=2, str1="number two")
assert expected == result
def test_int_and_int_overload_with_named_parameters(self):
expected = int_int_overload
sut = RootClass()
result = sut.run(num1=2, num2=6)
assert expected == result
def test_str_and_str_overload_with_named_parameters(self):
expected = str_str_overload
sut = RootClass()
result = sut.run(str1="number two", str2="number three")
assert expected == result
def test_int_overload_without_named_parameters(self):
expected = int_overload
sut = RootClass()
result = sut.run(2)
assert expected == result
def test_str_overload_without_named_parameters(self):
expected = str_overload
sut = RootClass()
result = sut.run("number two")
assert expected == result
def test_int_and_str_overload_without_named_parameters(self):
expected = int_str_overload
sut = RootClass()
result = sut.run(2, "number two")
assert expected == result
def test_int_and_int_overload_without_named_parameters(self):
expected = int_int_overload
sut = RootClass()
result = sut.run(2, 6)
assert expected == result
def test_str_and_str_overload_without_named_parameters(self):
expected = str_str_overload
sut = RootClass()
result = sut.run("number two", "number three")
assert expected == result
def test_flt_and_flt_overload_from_subclass_with_named_parameters(self):
expected = flt_flt_overload
sut = SubClass()
result = sut.run(flt1=1.0, flt2=2.42343)
assert expected == result
def test_int_and_int_overload_from_subclass_with_named_parameters(self):
expected = int_int_overload
sut = SubClass()
result = sut.run(num1=2, num2=6)
assert expected == result
def test_flt_and_flt_overload_from_subclass_without_named_parameters(self):
expected = flt_flt_overload
sut = SubClass()
result = sut.run(1.0, 2.42343)
assert expected == result
def test_int_and_int_overload_from_subclass_without_named_parameters(self):
expected = int_int_overload
sut = SubClass()
result = sut.run(2, 6)
assert expected == result
def test_no_overload_function_found_throw_exception(self):
sut = SubClass()
with pytest.raises(NoMatchingOverload):
# noinspection PyArgumentList
sut.run(
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
"test",
"method",
"does",
"not",
"exist",
)
|
Ecg-Ai-com/ecgai-overload | src/example/main.py | # from src.ecgai_overload.metaclass import overload, OverloadDict, OverloadMeta
#
#
# def overload_dict_usage():
# print("OVERLOAD DICT USAGE")
# d = OverloadDict()
#
# @overload
# def f(self):
# pass
#
# d["a"] = 1
# d["a"] = 2
# d["b"] = 3
# d["f"] = f
# d["f"] = f
# print(d)
#
#
# class A(metaclass=OverloadMeta):
# @overload
# def f(self, x: int):
# print('A.f int overload', self, x)
#
# @overload
# def f(self, x: str):
# print('A.f str overload', self, x)
#
# @overload
# def f(self, x, y):
# print('A.f two arg overload', self, x, y)
#
#
# class B(A):
# def normal_method(self):
# print('B.f normal method')
#
# @overload
# def f(self, x, y, z):
# print('B.f three arg overload', self, x, y, z)
#
# # works with inheritance too!
#
#
# class C(B):
# @overload
# def f(self, x, y, z, t):
# print('C.f four arg overload', self, x, y, z, t)
#
#
# def overloaded_class_example():
# print("OVERLOADED CLASS EXAMPLE")
#
# a = A()
# print(f'{a=}')
# print(f'{type(a)=}')
# print(f'{type(A)=}')
# print(f'{A.f=}')
#
# a.f(0)
# a.f("hello")
# # a.f(None) # Error, no matching overload
# a.f(1, True)
# print(f'{A.f=}')
# print(f'{a.f=}')
#
# b = B()
# print(f'{b=}')
# print(f'{type(b)=}')
# print(f'{type(B)=}')
# print(f'{B.f=}')
# b.f(0)
# b.f("hello")
# b.f(1, True)
# b.f(1, True, "hello")
# # b.f(None) # no matching overload
# b.normal_method()
#
# c = C()
# c.f(1)
# c.f(1, 2, 3)
# c.f(1, 2, 3, 4)
#
#
# def run():
# overload_dict_usage()
# overloaded_class_example()
#
#
# if __name__ == '__main__':
# run()
|
Ecg-Ai-com/ecgai-overload | tests/mock_class.py | from src.ecgai_overload.metaclass import OverloadMeta, overload
int_overload = f'int overload'
str_overload = f'str overload'
int_str_overload = f'int, str overload'
int_int_overload = f'int, int overload'
str_str_overload = f'str, str overload'
flt_flt_overload = f'float, float overload from sub class'
# noinspection PyUnusedLocal
class RootClass(metaclass=OverloadMeta):
@overload
def run(self, num1: int = 2):
return int_overload
@overload
def run(self, str1: str):
return str_overload
@overload
def run(self, num1: int, str1: str):
return int_str_overload
@overload
def run(self, num1: int, num2: int):
return int_int_overload
@overload
def run(self, str1: str, str2: str):
return str_str_overload
@overload
def run(self, str1: str, str2: str, num1: int = 7):
return str_str_overload
class SubClass(RootClass):
@overload
def run(self, flt1: float, flt2: float):
return flt_flt_overload
|
rafaank/rkpylib | rkpylib/rksocket.py | <reponame>rafaank/rkpylib<filename>rkpylib/rksocket.py
import socket
""" Yet to be developed """
def process_start(s_sock):
raw_request = s_sock.recv(32)
s_sock.send(ok_message)
s_sock.close()
sys.exit(0) #kill the child process
def create_socket_connection(ipaddr, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((ipaddr, port))
s.listen(1)
try:
while True:
try:
s_sock, s_addr = s.accept()
p = Process(target=process_start, args=(s_sock,))
p.start()
except socket.error:
# stop the client disconnect from killing us
print('Got a socket error')
except Exception as e:
print(f'An exception occured.. {e}')
sys.exit(1)
finally:
s.close()
create_socket_connection('127.0.0.1', 8383) |
rafaank/rkpylib | rkpylib/__init__.py | <gh_stars>0
__all__ = ['rkutils', 'rkdatasource', 'rkthread','rkhttp']
|
rafaank/rkpylib | rkpylib/backups/rkhttp_globals.py | <reponame>rafaank/rkpylib
from .rkdatasource import RKDataSource
from .rkutils import RKDict
from threading import Lock
def __init_globals__(globals):
globals.register('counter', 0)
dspool = list()
dspool_lock = list()
for i in range(5):
ds = RKDataSource(server='127.0.0.1', port=27017, database='test')
lck = Lock()
dspool.append(ds)
dspool_lock.append(lck)
globals.register('dspool', dspool)
globals.register('dspool_lock', dspool_lock)
globals.register('dspool_func', dspool_func)
globals.register('total_requests', 0)
def dspool_func(pool, pool_lock):
for idx, ds in enumerate(pool):
if pool_lock[idx].acquire(False):
print(f"Found dspool_item at index {idx}")
ds_obj = dict()
ds_obj['lock'] = pool_lock[idx]
ds_obj['ds'] = ds
return ds_obj
else:
continue
return None
|
rafaank/rkpylib | rkpylib/backups/rkhttp_copy.py | import errno
import socket
import time
from rkutils import RKDict, setInterval, trace_memory_leaks
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib import parse
from socketserver import ThreadingMixIn
from threading import Thread, Lock
from rklogger import RKLogger
import json
import traceback
import importlib
import importlib.util
import rkhttp_globals
import gc
import tracemalloc
ok_response_text = 'HTTP/1.0 200 OK\n\n'
nok_response_text = 'HTTP/1.0 404 NotFound\n\n'
class RKHttpGlobals():
def __init__(self, debug_mode = True):
self._nof_requests = 0
self._variables = dict()
self._debug_mode = debug_mode
self._lock = Lock()
rkhttp_globals.__init_globals__(self)
# self._refresh_stop = self.__refresh_globals__()
def __del__(self):
if self._lock.acquire(True, 10):
try:
for i, var in self._variables.items():
return False
elif var_name in self._variables and var_value is None:
del self._variables[var_name]
else:
self._variables[var_name]['value'] = var_value
self._variables[var_name]['last_reload'] = time.time()
return True
except:
return False
finally:
self._lock.release()
self._variables = None
def register(self, var_name, var_value) : #, reload_interval = None, reload_func = None):
RKLogger.debug(f'Register requested for {var_name}')
if self._lock.acquire(True, 1):
try:
if var_name in self._variables:
# Variable already exists
return False
elif var_value is None:
# Variable value cannot be None
return False
else:
self._variables[var_name] = dict()
self._variables[var_name]['name'] = var_name
self._variables[var_name]['value'] = var_value
# self._variables[var_name]['reload_interval'] = reload_interval
# self._variables[var_name]['reload_func'] = reload_func
# self._variables[var_name]['last_reload'] = time.time()
return True
except:
return False
finally:
self._lock.release()
else:
# failed to get lock
return False
def update(self, var_name, var_value):
RKLogger.debug(f'Update requested for {var_name}')
if self._lock.acquire(True, 1):
try:
if not var_name in self._variables:
return False
elif var_name in self._variables and var_value is None:
del self._variables[var_name]
else:
self._variables[var_name]['value'] = var_value
self._variables[var_name]['last_reload'] = time.time()
return True
except:
return False
finally:
self._lock.release()
else:
# failed to get lock
return False
def get(self, var_name):
RKLogger.debug(f'Get requested for {var_name}')
if self._lock.acquire(True, 1):
try:
if var_name in self._variables:
return self._variables[var_name]['value']
else:
return None
finally:
self._lock.release()
else:
return None
'''
def stop_reload(self, var_name):
RKLogger.debug(f'Stop reload requested for {var_name}')
if self._lock.acquire(True, 1):
try:
if var_name in self._variables:
self._variables[var_name]['reload_interval'] = None
self._variables[var_name]['reload_func'] = None
else:
return False
except:
return False
finally:
self._lock.release()
else:
# failed to get lock
return False
def stop_all_reload(self):
RKLogger.debug('Stopping all reload globals')
self._refresh_stop.set()
def __async__(self, function, *args, **kwargs):
def thread_run(globals): # executed in another thread
try:
RKLogger.debug(f'Executing variable reload function for {args[0]}')
new_value = function(*args, **kwargs)
except Exception as e:
RKLogger.exception('Variable deleted due to exception: {str(e)}')
globals.update(args[0], None)
else:
globals.update(args[0], new_value)
t = Thread(target=thread_run, args=(self,))
t.daemon = True # stop if the program exits
t.start()
@setInterval(300)
def __refresh_globals__(self):
RKLogger.debug('Refreshing globals')
# gc.collect()
for i, var in self._variables.items():
if var['reload_interval'] and var['reload_func']:
RKLogger.debug(f' variable {var["name"]}, reload_interval {var["reload_interval"]}, last_reload {var["last_reload"]}, time {time.time()} ')
if time.time() - var['last_reload'] > var['reload_interval']:
reload_func = var['reload_func']
self.__async__(reload_func, var['name'], var['value'])
'''
def RKHandlerClassFactory(myargs):
class RKHTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.globals = myargs
self.globals._nof_requests += 1
super(RKHTTPRequestHandler, self).__init__(*args, **kwargs)
def __del__(self):
self.request.path = None
self.request.parsed_path = None
self.request.url_params = None
self.request.command = None
self.request.headers = None
self.request.rfile = None
self.request = None
self.response.wfile = None
self.response.send_response = None
self.response.send_header = None
self.response.end_headers = None
self.response = None
self.globals = None
print("Destroying RKRequestHandler")
# super(RKHTTPRequestHandler, self).__del__()
def do_preprocess(self):
self.request = RKDict()
self.request.path = self.path
self.request.parsed_path = parse.urlparse(self.path)
self.request.url_params = parse.parse_qsl(self.request.parsed_path.query)
self.request.command = self.command
self.request.headers = self.headers
self.request.rfile = self.rfile
self.response = RKDict()
self.response.wfile = self.wfile
self.response.send_response = self.send_response
self.response.send_header = self.send_header
self.response.end_headers = self.end_headers
paths = self.request.parsed_path.path.split('/')
if len(paths) >= 3:
self.module_name = paths[1]
self.function_name = paths[2]
try:
#importlib.invalidate_caches()
# module = importlib.util.find_spec(self.module_name)
module = __import__(self.module_name)
if self.globals._debug_mode:
importlib.reload(module)
self.log_message(f'Successfully Loaded module {self.module_name}')
self.function = getattr(module, self.function_name)
self.log_message(f'Successfully Loaded function {self.function_name}')
except ModuleNotFoundError as mnfe:
self.send_error(400, str(mnfe))
return False
except AttributeError as ae:
self.send_error(400, str(ae))
except Exception as e:
self.send_error(500, str(e), traceback.format_exc())
return False
else:
self.function = self.handle_default
self.function_name = 'Default'
return True
def do_GET(self):
if self.do_preprocess():
try:
RKLogger.debug(f'Executing function {self.function_name}')
self.function(self.globals, self.request, self.response)
RKLogger.debug(f'Completed function {self.function_name}')
except BrokenPipeError as bpe:
RKLogger.exception(str(bpe))
except Exception as e:
try:
self.send_error(500, str(e), traceback.format_exc())
except:
pass
finally:
pass
#trace_memory_leaks()
def do_POST(self):
if self.do_preprocess():
try:
content_length = int(self.headers['Content-Length'])
self.request.post_data = self.rfile.read(content_length)
except Exception as e:
self.send_error(500, str(e), traceback.format_exc())
return
try:
RKLogger.debug(f'Executing function {self.function_name}')
self.function(self.request, self.response)
RKLogger.debug(f'Completed function {self.function_name}')
except Exception as e:
self.send_error(500, str(e), traceback.format_exc())
def log_message(self, format, *args):
RKLogger.debug(format, *args)
def log_error(self, format, *args):
RKLogger.exception(format, *args)
def log_response_text(self, format, *args):
RKLogger.debug(format, *args)
def handle_default(self, request, response):
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write('RKHttp is active...'.encode('utf-8'))
return RKHTTPRequestHandler
class RKHTTPServer(ThreadingMixIn, HTTPServer):
pass
if __name__ == '__main__':
tracemalloc.start()
try:
#gc.set_debug(gc.DEBUG_LEAK)
ipaddr = socket.gethostname()
port = 8282
RKLogger.initialize('rkhttp', 'rkhttp.log', RKLogger.DEBUG)
gl = RKHttpGlobals()
server = RKHTTPServer((ipaddr, port), RKHandlerClassFactory(gl))
print (f'listening on address {ipaddr} and port {port}')
server.serve_forever()
except Exception as e:
print(e)
finally:
del(gl)
del(server)
gc.collect()
trace_memory_leaks()
'''
list = [1, 2, 3]
dictionary = {1: 'one', 2: 'two', 3: 'three'}
tuple = (1, 2, 3)
set = {1, 2, 3}
''' |
rafaank/rkpylib | rkpylib/rkthread.py | import threading
import queue
from enum import Enum
import time
class ThreadStatus(Enum):
NOT_STARTED = 1
IDLE = 2
RUNNING = 3
TERMINATED = 4
MAX_JOBS_PER_THREAD = 10
class RKThread(threading.Thread):
"Thread-Pool Runner thread"
def __init__(self, manager, thread_id, queue, queue_lock, on_run, on_complete, on_error):
threading.Thread.__init__(self)
self.manager = manager
self.thread_id = thread_id
self.status = ThreadStatus.NOT_STARTED
self.queue = queue
self.queue_lock = queue_lock
self.jobs_done = 0
self.daemon = True
self.do_terminate = False
self._on_run = on_run
self._on_complete = on_complete
self._on_error = on_error
print(f'Thread Id: {self.thread_id} - Created')
def run(self):
print(f'Thread Id: {self.thread_id} - Running')
while self.jobs_done < MAX_JOBS_PER_THREAD and not self.do_terminate:
try:
self.status = ThreadStatus.IDLE
print(f'Thread Id: {self.thread_id} - Idle')
try:
job = self.queue.get(True, 5)
except:
print(f'Thread Id: {self.thread_id} - Job Queue Empty')
continue
if job is None:
print(f'Thread Id: {self.thread_id} - Job cannot be of type None')
continue
print(f'Thread Id: {self.thread_id} - Job Details = {job}')
try:
self.status = ThreadStatus.RUNNING
self.jobs_done += 1
self._on_run(self.thread_id, self.jobs_done, job)
except Exception as e:
self._on_error(self.thread_id, e, job)
finally:
self._on_complete(self.thread_id, self.jobs_done, job)
self.status = ThreadStatus.IDLE
except queue.Empty as emp:
pass
print(f'Thread Id: {self.thread_id} - Terminating')
self.status = ThreadStatus.TERMINATED
mgr.unregister_thread(self.thread_id)
class RKThreadManager:
"Thread-Pool Manager"
def __init__(self, max_threads, on_run, on_complete, on_error):
self._max_threads = max_threads
self._on_run = on_run
self._on_complete = on_complete
self._on_error = on_error
self._active_threads = 0
self._counter = 0
self.terminating = False
self._thread_lock = threading.Lock()
self.queue_lock = threading.Lock()
self.threads = dict()
self.queue = queue.Queue(100)
print(f'ThreadManager - Initialized')
def start(self, thread_count = 0):
print(f'ThreadManager - Starting {thread_count} worker threads')
if not self.terminating:
if thread_count == 0:
thread_count = self._max_threads
elif thread_count > self._max_threads:
return False
i = 0
while i < thread_count:
self.new_thread()
i += 1
return True
else:
return False
def new_thread(self):
if not self.terminating:
if self._active_threads < self._max_threads:
if self._thread_lock.acquire(True, 30):
try:
thread = RKThread(self, self._counter, self.queue, self.queue_lock, self._on_run, self._on_complete, self._on_error)
self.threads[self._counter] = thread
self._counter += 1
self._active_threads += 1
finally:
self._thread_lock.release()
thread.start()
return False
else:
# could not acquire thread lock
return False
else:
# reached maxed thread
return False
else:
return False
def unregister_thread(self, threadid):
if self._thread_lock.acquire(True, 30):
try:
del self.threads[threadid]
self._active_threads -= 1
finally:
self._thread_lock.release()
if not self.terminating:
self.new_thread()
def add_job(self, job_data_object):
if not self.terminating:
if self.queue_lock.acquire(True, 30) :
try:
self.queue.put_nowait(job_data_object)
return True
except queue.Full as full:
return False
except:
return False
finally:
self.queue_lock.release()
else:
# Failed to acquire Queue Lock
return False
else:
return False
def wait_finish(self):
def wait_for_thread_terminate():
while self._active_threads != 0:
time.sleep(1)
print(f'ThreadManager - Launching WAIT thread')
wait_thread = threading.Thread(target = wait_for_thread_terminate, args=(), kwargs={})
wait_thread.daemon = True
wait_thread.start()
wait_thread.join()
def terminate(self):
print(f'ThreadManager - Terminating all ({self._active_threads}) threads')
self.terminating = True
self._thread_lock.acquire(True, 30)
try:
for index,thread in self.threads.items():
thread.do_terminate = True
finally:
self._thread_lock.release()
if __name__ == '__main__':
print('Testing Message %s','By RK')
def dorun(thread_id, jobs_done, job_data):
print(f'Running on ThreadId {thread_id}, JobsDone {jobs_done}, data {job_data}')
def docomplete(thread_id, jobs_done, job_data):
print(f'Completing on ThreadId {thread_id}, JobsDone {jobs_done}, data {job_data}')
def doerror(thread_id, error, job_data):
print(f'Error on ThreadId {thread_id} with data {job_data}')
mgr = RKThreadManager(11, dorun, docomplete, doerror)
mgr.start(1)
i = 0
while i<11:
i +=1
print(f'Adding Job {i}')
mgr.add_job(f'Job {i}')
time.sleep(1)
# mgr.terminate()
# or
mgr.wait_finish()
print ('Process Completed')
|
rafaank/rkpylib | rkpylib/rkclusterlockserver.py | import ssl
from socketserver import ThreadingMixIn, TCPServer, BaseRequestHandler
from threading import Lock
import socket
import platform, os, sys, getopt
from rkclusterlock import RKClusterLock
import rkutils
import logging
help_str = '''
helo :
Welcome message from server
reg <app_name> :
Register a new APP:
acq <app_name> <acquire_wait_time> <max_release_time> :
Client request to acquire lock for APP <app_name>.
Server will wait for <acquire_wait_time> to acquire the lock
and wait for <max_release_time> for the lock to be release
otherwise releases the lock forcefully.
lck <data> :
Servers response to acq, mentioning the lock is acquired
followed by the current data stored for the APP
rel <data> :
Release lock for the acquired APP and set the new data.
fail :
Servers response to acq, mentioning the server failed to
acquire lock for this client during the <acquire_wait_time>
help :
Help using RKClusterLock
err :
Error occured at server side, which could be due invalid
commands or wrong sequence
quit :
End connection with the server\n
'''
class CLConstants() :
WELCOME = 'helo'
REGISTER = 'reg'
ACQUIRE = 'acq'
LOCKED = 'lck'
RELEASE = 'rel'
FAILED = 'fail'
HELP = 'help'
ERROR = 'err'
QUIT = 'quit'
SEPARATOR = ' '
BUF_SIZE = 128
class RKClusterNode():
def __init__(self):
self.lock = Lock()
self.data = ""
class RKClusterLockServer():
def __init__(self):
self.logger = logging.getLogger('rkclusterlock')
self.logger.setLevel(logging.DEBUG) # logging.ERROR
log_file = '/var/log/rkclusterlock.log'
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
log_format = '%(asctime)s - %(name)s - %(ip)s:%(port)d - %(levelname)s - %(message)s'
formatter = logging.Formatter(log_format)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def start(self, host, port, data_path):
def load_app_data(nodes):
for filename in os.listdir(data_path):
if os.path.isfile(data_path + "/" + filename) and filename[0:1] != ".":
f=open(data_path + "/" + filename, 'r')
data = ""
for line in f:
data = data + line
node = RKClusterNode()
node.lock = Lock()
node.data = data
nodes[filename] = node
f.close()
@rkutils.setInterval(60)
def save_app_data(nodes):
for app_name,node in nodes.items():
if node.lock.acquire(True, 1):
try:
data = node.data
finally:
node.lock.release()
with open(data_path + "/" + app_name, 'w+') as f:
f.write(data)
nodes = dict()
load_app_data(nodes)
save_app_data(nodes)
self.server = RKTCPServer((host, port), RKTCPHandlerClassFactory(nodes, self.logger))
ip, port = self.server.server_address
extra = {'ip':ip, 'port':port}
self.logger.info('RKClusterLock server started...', extra=extra)
self.server.serve_forever()
def stop(self):
self.server.shutdown()
self.server.server_close()
class RKTCPServer(ThreadingMixIn, TCPServer): # Can be derived from RKSSLTCPServer instead of TCPServer for SSL Support
pass
class RKSSLTCPServer(TCPServer):
def __init__(self, server_address, RequestHandlerClass, certfile, keyfile, ssl_version=ssl.PROTOCOL_TLSv1, bind_and_activate=True ):
TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
self.certfile = certfile
self.keyfile = keyfile
self.ssl_version = ssl_version
def get_request(self):
newsocket, fromaddr = self.socket.accept()
connstream = ssl.wrap_socket(newsocket, server_side=True, certfile = self.certfile, keyfile = self.keyfile, ssl_version = self.ssl_version)
return connstream, fromaddr
def RKTCPHandlerClassFactory(nodes, logger):
class RKTCPRequestHandler(BaseRequestHandler):
def __init__(self, *args, **kwargs):
self.nodes = nodes
self.logger = logger
super(RKTCPRequestHandler, self).__init__(*args, **kwargs)
def handle(self):
clientip, port = self.request.getpeername()
self.logger_extra = {'ip':clientip, 'port':port}
self.logger.info("New client connection",extra=self.logger_extra)
response = CLConstants.WELCOME + f": Lock Server waiting for request \n"
self.request.sendall(bytes(response, 'ascii'))
while 1:
data = ""
try:
#self.request.setdefaulttimeout(5.0)
self.request.settimeout(None)
try:
data = str(self.request.recv(CLConstants.BUF_SIZE), 'ascii').strip()
except socket.timeout as to:
self.logger.error("Timeout reading from client", extra=self.logger_extra)
continue
self.logger.debug(f"Got data packet: {data}", extra=self.logger_extra)
if not data:
self.logger.debug("Client disconnected", extra=self.logger_extra)
break
data_arr = data.split(CLConstants.SEPARATOR)
if data_arr[0] == CLConstants.ACQUIRE:
try:
app_name = data_arr[1].strip()
except IndexError as ie:
self.logger.error("Missing app_name",extra=self.logger_extra)
response = CLConstants.FAILED + "\n"
self.request.sendall(bytes(response, 'ascii'))
continue
try:
acquire_wait_time = float(data_arr[2])
except (IndexError, ValueError) as err:
acquire_wait_time = 5
try:
max_release_time = float(data_arr[3])
except (IndexError, ValueError) as err:
max_release_time = 5.0
try:
node = self.nodes[app_name]
except KeyError as ke:
self.logger.error("APP {app_name} not registered",extra=self.logger_extra)
response = CLConstants.ERROR + f": APP {app_name} not registered\n"
self.request.sendall(bytes(response, 'ascii'))
continue
if node.lock.acquire(True, acquire_wait_time):
try:
self.logger.info("Lock Acquired",extra=self.logger_extra)
self.request.settimeout(max_release_time)
response = CLConstants.LOCKED + CLConstants.SEPARATOR + node.data + "\n"
self.request.sendall(bytes(response, 'ascii'))
data = str(self.request.recv(CLConstants.BUF_SIZE), 'ascii').strip()
data_arr = data.split(CLConstants.SEPARATOR, 1)
if data_arr[0] == CLConstants.RELEASE:
try:
node.data = data_arr[1].strip()
except:
pass
self.logger.info(f"RELEASE request received, setting new data = {node.data}", extra=self.logger_extra)
response = CLConstants.RELEASE + CLConstants.SEPARATOR + ": success\n"
self.logger.info(f"Sending response {response} to client",extra=self.logger_extra)
self.request.sendall(bytes(response, 'ascii'))
self.logger.info(f"Response sent to client", extra=self.logger_extra)
else:
self.logger.error(f"Expected {CLConstants.RELEASE} got {data_arr[0]}, lock released forcefully",extra=self.logger_extra)
response = CLConstants.ERROR + CLConstants.SEPARATOR + f": Expected {CLConstants.RELEASE}, lock released forcefully\n"
self.request.sendall(bytes(response, 'ascii'))
except socket.timeout as to:
self.logger.error("<max_release_time> timeout, lock released forcefully",extra=self.logger_extra)
response = CLConstants.ERROR + CLConstants.SEPARATOR + ":<max_release_time> timeout, lock released forcefully\n"
self.request.sendall(bytes(response, 'ascii'))
continue
except Exception as e:
self.logger.error("Some Error" + str(e), extra=self.logger_extra)
finally:
node.lock.release()
self.logger.info("Lock Released",extra=self.logger_extra)
else:
# Failed to acquire lock
response = CLConstants.FAILED + "\n"
self.request.sendall(bytes(response, 'ascii'))
elif data_arr[0] == CLConstants.REGISTER:
try:
app_name = data_arr[1].strip()
node = RKClusterNode()
node.lock = Lock()
self.nodes[app_name] = node
response = CLConstants.REGISTER + CLConstants.SEPARATOR + ": success\n"
self.request.sendall(bytes(response, 'ascii'))
except:
self.logger.error("Missing app_name",extra=self.logger_extra)
response = CLConstants.ERROR + CLConstants.SEPARATOR + "app_name invalid or missing: \n"
self.request.sendall(bytes(response, 'ascii'))
elif data_arr[0] == CLConstants.HELP:
response = CLConstants.HELP + CLConstants.SEPARATOR + help_str
self.request.sendall(bytes(response, 'ascii'))
elif data_arr[0] == CLConstants.RELEASE:
self.logger.error("No active lock to be released",extra=self.logger_extra)
response = CLConstants.ERROR + ": No active lock to be released\n"
self.request.sendall(bytes(response, 'ascii'))
elif data_arr[0] == CLConstants.QUIT:
self.request.close()
break
else:
self.logger.error("Unexpected message",extra=self.logger_extra)
response = CLConstants.ERROR + ": Unexpected message\n"
self.request.sendall(bytes(response, 'ascii'))
except ConnectionError as ce:
self.logger.error("Connection error", extra=self.logger_extra)
break
except Exception as e:
#An Unknown exception has occured, lets not do anything just log the error and close the socket
self.logger.exception(str(e), extra=self.logger_extra)
try:
self.request.close()
except:
pass
break
return RKTCPRequestHandler
if __name__ == "__main__":
ahost = "0.0.0.0"
aport = "0"
adata_path = ""
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv,"h:p:d:",["host=","port=","data-path="])
except getopt.GetoptError:
print('Invalid parameters, use the below syntax')
print('glhttp.py -h <host> -p <port> -d <data-path>')
print()
print('\t-h <host>\t IP address on which to start the RKCLusterLockServer, defaults to 0.0.0.0')
print('\t-p <port>\t Port on which RKClusterLockServer will listen, defaults to 9191')
print('\t-d <data-path>\t Path where the application data will be stored')
sys.exit(2)
for opt, arg in opts:
if opt == '--help':
print('glhttp.py -h <host> -p <port> -d <data-path>')
sys.exit()
elif opt in ("-h", "--host"):
ahost = arg
elif opt in ("-p", "--port"):
aport = arg
elif opt in ("-d", "--data-path"):
adata_path = arg
port = int(aport)
host = ahost
data_path = adata_path
if not os.path.isdir(data_path):
print("error: data-path does not exist")
sys.exit(2)
if not port in range(1,65525):
port = 9191
lock_server = RKClusterLockServer()
lock_server.start(host, port, data_path)
|
rafaank/rkpylib | tests/rktest.py | <gh_stars>0
import sys
import json
import time
import datetime
import random
from bson import json_util
from rkpylib.rkdatasource import RKDataSource
from rkpylib.rkhttp import RKHTTP
import pymongo
@RKHTTP.route('/pool')
def pool_example(globals, request, response):
#ds = RKDataSource(server='172.16.31.10', port=27017, database='testgl')
#ds = RKDataSource(server='127.0.0.1', port=27017, database='test')
resp_json = dict()
response.send_response(200)
response.send_header('Content-Type', 'application/json')
response.end_headers()
total_requests = globals.get('total_requests') + 1
globals.set('total_requests', total_requests)
resp_json['total_requests'] = total_requests
resp_json['new'] = "new"
dspool = globals.get("dspool")
dspool_func = globals.get("dspool_func")
ds = dspool_func(dspool) if dspool or dspool_func() else None
if not ds:
print("All DataSource inuse - Creating new DataSource")
ds = RKDataSource(server='127.0.0.1', port=27017, database='test')
data = ds.db['restaurants'].find({'cuisine': 'American'}).skip(1).limit(5)
ds.client.close()
else:
print("Data Source Found")
data = ds.db.restaurants.find_one({'cuisine': 'American'})
ds.lock.release()
if data:
resp_json['code'] = 'success'
if isinstance(data, pymongo.cursor.Cursor):
resp_json['data'] = [row for row in data]
else:
resp_json['data'] = data
else:
resp_json['code'] = 'not_found'
resp_json['data'] = "No records found"
response_text = json.dumps(resp_json, default=json_util.default)
response.wfile.write(response_text.encode("utf-8"))
@RKHTTP.route('/table')
def table_example(globals, request, response):
def date_range(start, end):
for n in range(int((end-start).days)+1):
yield start + datetime.timedelta(n)
def fill_up_rest(start, end):
print(f"fill_up_rest {start}, {end}")
html = ""
for dt in date_range(start, end):
html += '<td style="width:100px;border-right:1px solid black">0</td>'
return html
def process_row(start, end):
print(f"process_row {start}, {end}")
html = ""
for dt in date_range(start, end):
if dt == end:
html += '<td style="width:100px;border-right:1px solid black">1</td>'
else:
html += '<td style="width:100px;border-right:1px solid black">0</td>'
return html
start_date = datetime.date(2018, 4, 15)
end_date = datetime.date(2018, 5, 1)
arr = list()
for i in range(1,100):
rand = random.sample(list(date_range(start_date, end_date)), 10)
rand.sort()
for dt in rand:
obj = dict()
obj["usrid"] = i
obj["date"] = dt.strftime('%Y-%m-%d')
arr.append(obj)
html = '<table><thead><td style="width:100px;border-right:1px solid black">UserID</td>'
for dt in date_range(start_date, end_date):
html += f'<td style="width:100px;border-right:1px solid black">{dt.strftime("%Y-%m-%d")}</td>'
act_usrid = 0
last_date = start_date
for r in arr:
if act_usrid == r["usrid"]:
dt = datetime.datetime.strptime(r["date"], "%Y-%m-%d").date()
html += process_row(last_date, dt)
last_date = dt + datetime.timedelta(days = 1)
else:
if act_usrid > 0:
# fill up all rest
html += fill_up_rest(last_date, end_date)
html += '</tr>'
last_date = start_date
act_usrid = r["usrid"]
dt = datetime.datetime.strptime(r["date"], "%Y-%m-%d").date()
html += f'<tr><td style="width:100px;border-right:1px solid black">{act_usrid}</td>'
html += process_row(last_date, dt)
last_date = dt + datetime.timedelta(days = 1)
if len(arr) > 0:
html += fill_up_rest(last_date, end_date)
html += '</tr>'
html += '</thead></table>'
response.send_response(200)
response.send_header('Content-Type', 'text/html')
response.end_headers()
response_text = html
response.wfile.write(response_text.encode("utf-8"))
|
rafaank/rkpylib | setup.py | from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='RKPyLib',
version='0.0.1',
description='Efficient customised python wrapper libraries ',
long_description=readme,
author='rafaank',
author_email='<EMAIL>',
url='https://github.com/rafaank/rkpylib',
license=license,
packages=find_packages()
) |
rafaank/rkpylib | tests/test_cgi.py | from http.server import HTTPServer, BaseHTTPRequestHandler
import cgi
class WebServerHandler(BaseHTTPRequestHandler):
form_html = \
'''
<form method='POST' enctype='multipart/form-data' action='/hello'>
<h2>What would you like me to say?</h2>
<input name="message" type="text"><input type="submit" value="Submit" >
</form>
'''
def do_GET(self):
try:
if self.path.endswith("/hello"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = ""
output += "<html>" \
" <body>" \
" Hello!<br>" + self.form_html + \
" </body>" \
"</html>"
self.wfile.write(output.encode())
print(output)
if self.path.endswith("/hola"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = "<html>" \
" <body>" \
" ¡Hola! <br>" + self.form_html + \
" <a href='/hello'>Back Home</a>" \
" </body>" \
"</html>"
self.wfile.write(output.encode())
print(output)
except IOError:
self.send_error(404, "File Not Found {}".format(self.path))
def do_POST(self):
try:
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.end_headers()
# HEADERS are now in dict/json style container
print(self.headers)
ctype = self.headers['content-type']
print(f"ctype = {ctype}")
if ctype.startswith('multipart/form-data'):
ctype, pdict = cgi.parse_header(self.headers['content-type'])
print(f"ctype1 = {ctype}")
print(pdict)
# boundary data needs to be encoded in a binary format
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
messagecontent = fields.get('message')
output = ""
output += "<html><body>"
output += " <h2> Okay, how about this: </h2>"
# decode it back into a string rather than byte string(b'stuff')
output += "<h1> {} </h1>".format(messagecontent[0].decode())
output += self.form_html
output += "</body></html>"
self.wfile.write(output.encode())
print(output)
except:
raise
def main():
try:
port = 8282
server = HTTPServer(('', port), WebServerHandler)
print("Web server is running on port {}".format(port))
server.serve_forever()
except KeyboardInterrupt:
print("^C entered, stopping web server...")
finally:
if server:
server.socket.close()
if __name__ == '__main__':
main()
|
rafaank/rkpylib | tests/rkmain.py | <gh_stars>0
from rkpylib.rkhttp import RKHTTPGlobals, RKHTTP
from rkpylib.rkdatasource import RKDataSource
from rkpylib.rkutils import trace_memory_leaks
from rktest import *
from test import *
import gc
import tracemalloc
import pprint
import socket
if __name__ == '__main__':
''' Function to get a free datasource object from pool '''
def dspool_func(pool):
for idx, ds in enumerate(pool):
if ds.lock.acquire(False):
return ds
else:
continue
return None
tracemalloc.start()
try:
#gc.set_debug(gc.DEBUG_LEAK)
ipaddr = socket.gethostname()
port = 8282
''' Creating pool of Datasource and locks to enable thread-safe processing '''
dspool = list()
for i in range(5):
ds = RKDataSource(server='127.0.0.1', port=27017, database='test')
dspool.append(ds)
server = RKHTTP.server((ipaddr, port), "rkmain_testapp", "/var/log/rkhttp.log")
print (f'listening on address {ipaddr} and port {port}')
''' Adding datasource and lock to globally accessing variables list '''
server.globals.register('dspool', dspool)
server.globals.register('dspool_func', dspool_func)
server.globals.register('total_requests', 0)
server.globals.register('counter', 0)
server.serve_forever()
finally:
print ("Closing Down")
for i in range(2):
print('Collecting %d ...' % i)
n = gc.collect()
print('Unreachable objects:', n)
print('Remaining Garbage:', pprint.pprint(gc.garbage))
print
trace_memory_leaks()
#traceback_memory_leaks()
'''
list = [1, 2, 3]
dictionary = {1: 'one', 2: 'two', 3: 'three'}
tuple = (1, 2, 3)
set = {1, 2, 3}
'''
|
rafaank/rkpylib | rkpylib/rkclusterlock.py | <reponame>rafaank/rkpylib
# import ssl
import socket
class RKClusterLock():
WELCOME = 'helo'
REGISTER = 'reg'
ACQUIRE = 'acq'
LOCKED = 'lck'
RELEASE = 'rel'
FAILED = 'fail'
HELP = 'help'
ERROR = 'err'
QUIT = 'quit'
SEPARATOR = ' '
BUF_SIZE = 128
def __init__(self, ip, port, app_name):
self.ip = ip
self.port = port
self.app_name = app_name
self.connect()
def __del__(self):
try:
self.sock.close()
except:
pass
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(5)
#self.sock.setdefaulttimeout(5)
# For ssl support we can use self.ssl_sock instead of self.sock
# self.ssl_sock = ssl.wrap_socket(s, ca_certs="cert.pem", cert_reqs=ssl.CERT_REQUIRED, ssl_version=ssl.PROTOCOL_TLSv1)
self.sock.connect((self.ip, self.port))
response = str(self.sock.recv(RKClusterLock.BUF_SIZE), 'ascii')
def acquire(self, wait, acquire_wait_time = 5, max_release_time = 5):
data = None
try:
if not wait:
acquire_wait_time = 0
message = f'{RKClusterLock.ACQUIRE}{RKClusterLock.SEPARATOR}{self.app_name}{RKClusterLock.SEPARATOR}{acquire_wait_time}{RKClusterLock.SEPARATOR}{max_release_time}'
try:
self.sock.sendall(bytes(message, 'ascii'))
except ConnectionError as ce:
self.connect()
self.sock.sendall(bytes(message, 'ascii'))
response = str(self.sock.recv(RKClusterLock.BUF_SIZE), 'ascii')
response = response.strip()
response_arr = response.split(RKClusterLock.SEPARATOR,1)
if response_arr[0] == RKClusterLock.LOCKED:
#Got the lock, lets return True
try:
data = response_arr[1]
except IndexError as ie:
pass
return True, data
elif response_arr[0] == RKClusterLock.FAILED:
return False, None
else:
data = response_arr[1]
raise Exception(data)
return False, None
except socket.timeout as to:
return False, None
except ConnectionError as ce:
self.connect()
return False, None
def release(self, data = ""):
if not isinstance(data, str):
raise ValueError("parameter data must be of string type")
return False
message = RKClusterLock.RELEASE + RKClusterLock.SEPARATOR + data
self.sock.sendall(bytes(message, 'ascii'))
# we take the response and ignore it
response = str(self.sock.recv(RKClusterLock.BUF_SIZE), 'ascii')
return True
|
rafaank/rkpylib | rkpylib/backups/rkhttp_backup.py | import time
import os
import sys
import errno
from multiprocessing import Process
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib import parse
from socketserver import ForkingMixIn, ThreadingMixIn
ok_message = 'HTTP/1.0 200 OK\n\n'
nok_message = 'HTTP/1.0 404 NotFound\n\n'
class MyClass():
def __init__(self, args):
self.ctr = 0
self.name = args
def RKHandlerClassFactory(myargs):
class RKHTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.variable = myargs
self.variable.ctr += 1
super(RKHTTPRequestHandler, self).__init__(*args, **kwargs)
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
message = f'process_id={os.getpid()} and Variable = {self.variable.name} and Counter = {self.variable.ctr}\r\n'
self.wfile.write(message.encode('utf-8'))
return RKHTTPRequestHandler
class RKHTTPRequestHandler1(BaseHTTPRequestHandler):
def do_GET(self):
parsed_path = parse.urlparse(self.path)
request = [
f'client_address={self.client_address} ({self.address_string()})',
f'command={self.command}',
f'path={self.path}',
f'real_path={parsed_path.path}',
f'query_params={parsed_path.query}',
f'request_version={self.request_version}'
]
server = [
f'server_version={self.server_version}',
f'sys_version={self.sys_version}',
f'protocol_version={self.protocol_version}',
f'process_id={os.getpid()}'
]
headers = []
for name, value in sorted(self.headers.items()):
headers.append(f'{name}={value.rstrip}')
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
message = '\r\n'.join(request) + '\r\n' + '\r\n'.join(server)
self.wfile.write(message.encode('utf-8'))
class RKHTTPServer(ForkingMixIn, HTTPServer):
pass
if __name__ == '__main__':
ipaddr = socket.gethostname()
port = 8282
cls = MyClass("Fazal Khan")
server = RKHTTPServer((ipaddr, port), RKHandlerClassFactory(cls))
print (f'listening on address {ipaddr} and port {port}')
server.serve_forever()
'''
list = [1, 2, 3]
dictionary = {1: "one", 2: "two", 3: "three"}
tuple = (1, 2, 3)
set = {1, 2, 3}
''' |
rafaank/rkpylib | tests/sample.py | <filename>tests/sample.py<gh_stars>0
import os
import sys
import json
import socket
import traceback
from rkpylib.rkhttp import RKHTTP
from urllib import parse
ok_response_text = 'HTTP/1.1 200 OK\n\n'
@RKHTTP.route('/hello')
def hello(globals, request, response):
response.send_response(200)
response.send_header('Content-Type', 'text/html')
response.end_headers()
response.wfile.write(ok_response_text.encode("utf-8"))
@RKHTTP.route('/sample')
def sample(globals, request, response):
"""RKHTTPServer
A single-core multi-threaded HTTPServer that enables sharing of data
and information amongst http requests using a global instance of type RKHTTPGlobals.
RKHTTPGlobals
A thread-safe storage class, that makes it easy to store and retrieve variables
that can be shared amongst requests within a process
Global Functions
globals.register(self, var_name, var_value):
Registers a new variable in the global scope, this variable is accessible
and shares the same value across all threads within the RKHttp instance scope.
globals.unregister(self, var_name):
Unregisters a variable from the global scope
globals.get(var_name):
Returns the values for a global variable
globals.set(var_name, var_value):
Updates value of a global variable.
globals.inc(self, var_name, inc_val = 1):
Increments the value of a global variable with inc_val. You can use negative integers to decrement a value.
Request Variables
request.client_address:
Contains a tuple of the form (host, port) referring to the client’s address.
request.server:
Contains the server instance
request.close_connection:
Boolean that should be set before handle_one_request() returns,
indicating if another request may be expected, or if the connection should be shut down.
request.requestline:
Contains the string representation of the HTTP request line. The terminating CRLF is stripped.
This attribute should be set by handle_one_request(). If no valid request line was processed,
it should be set to the empty string
request.command:
Contains the command (request type). For example, 'GET'
request.path:
Contains the request path
request.request_version:
Contains the version string from the request. For example, 'HTTP/1.0'.
request.parsed_path:
Contains ParseResult object which is retrieved after processing the url through the parse.urlparse(path) function
request.url_params:
Url query params processed into a dictionary for ready to access
request.url_paramsl:
Url query params parsed as a list, Data are returned as a list of name, value pairs
request.url_paramsd:
Url query params processed into a dictionary, The dictionary keys are the unique
query variable names and the values are lists of values for each name.
request.headers:
Extends access to the request headers dictionary.
request.command:
Contains the command (request type). For example, 'GET' or 'POST'. All other types are unsupported
request.rfile:
Reference to io.BufferedIOBase input stream of BaseHTTPHandler, ready to read from
the start of the optional input data. This should ideally be not required as all
the data is already read and processed in easily readable variables
response.wfile:
Reference to the io.BufferedIOBase output stream of BaseHTTPHandler for writing a
response back to the client. Proper adherence to the HTTP protocol must be used
when writing to this stream in order to achieve successful interoperation with HTTP clients
Response Functions
response.send_response(code, message=None):
Reference to the send_response function of BaseHTTPHandler. Adds a response header
to the headers buffer and logs the accepted request. The HTTP response line is
written to the internal buffer, followed by Server and Date headers. The values for
these two headers are picked up from the version_string() and date_time_string() methods,
respectively. If the server does not intend to send any other headers using the
send_header() method, then send_response() should be followed by an end_headers() call.
response.send_error(code, message=None):
Reference to the send_error function of BaseHTTPHandler. Adds a response header
to the headers buffer and logs the accepted request. The HTTP response line is written
to the internal buffer, followed by Server and Date headers. The values for these
two headers are picked up from the version_string() and date_time_string() methods,
respectively. If the server does not intend to send any other headers using the send_header()
method, then send_response() should be followed by an end_headers() call.
response.send_header(keyword, value):
Reference to the send_header function of BaseHTTPHandler. Adds the HTTP header to an internal
buffer which will be written to the output stream when either end_headers() or flush_headers()
is invoked. keyword should specify the header keyword, with value specifying its value. Note that,
after the send_header calls are done, end_headers() MUST BE called in order to complete the operation
response.end_headers():
Reference to the end_headers function of BaseHTTPHandler. Adds a blank line (indicating the end
of the HTTP headers in the response) to the headers buffer and calls flush_headers().
"""
globals.inc("counter")
response.send_response(200)
response.send_header('Content-Type', 'application/json')
response.end_headers()
post_data = None
if request.command == "POST":
try:
# post_data = parse.parse_qs(post_data) //Can be used to parse other formats like form-data
#print(f'Content Length = {int(request.headers["Content-Length"])}')
post_data = request.post_data
if not globals._config['parse_post_data']:
ctype = request.headers['content-type']
if ctype:
if ctype == 'application/json':
content_length = int(request.headers['Content-Length']) # <--- Gets the size of data
post_data = request.rfile.read(content_length) # <--- Gets the data itself
post_data = json.loads(post_data.decode('utf-8'))
elif ctype.startswith('multipart/form-data'):
# boundary data needs to be encoded in a binary format
ctype, pdict = cgi.parse_header(request.headers['content-type'])
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
post_data = cgi.parse_multipart(request.rfile, pdict)
elif ctype == 'application/x-www-form-urlencoded':
content_length = int(request.headers['Content-Length']) # <--- Gets the size of data
post_data = request.rfile.read(content_length) # <--- Gets the data itself
post_data = parse.parse_qs(post_data.decode('utf-8'))
except json.decoder.JSONDecodeError as je:
response.send_error(500, str(je), traceback.format_exc())
return
except Exception as e:
response.send_error(500, 'Internal Server Error - ' + str(e))
return
resp_json = dict()
resp_json['code'] = 200
resp_json['data'] = dict()
resp_json['data']['os.getpid()'] = os.getpid()
resp_json['data']['request.parsed_path.path'] = request.parsed_path.path
resp_json['data']['request.parsed_path.query'] = request.parsed_path.query
resp_json['data']['request.url_params'] = request.url_params
resp_json['data']['request.url_paramsd'] = request.url_paramsd
resp_json['data']['request.url_paramsl'] = request.url_paramsl
resp_json['data']['request.headers'] = str(request.headers)
resp_json['data']['command'] = request.command
resp_json['data']['request.post_data'] = post_data
resp_json['data']['globals._nof-request'] = globals._nof_requests
resp_json['data']['globals.get("counter")'] = globals.get("counter")
response_text = json.dumps(resp_json)
response.wfile.write(response_text.encode("utf-8"))
if __name__ == '__main__':
ipaddr = socket.gethostname()
port = 8282
server = RKHTTP.server((ipaddr, port), "sample_app", "/var/log/rkhttp.log")
server.globals.register('counter', 0)
print (f'listening on address {ipaddr} and port {port}')
server.serve_forever()
|
rafaank/rkpylib | rkpylib/rkdatasource.py | <filename>rkpylib/rkdatasource.py
from pymongo import MongoClient
from threading import Lock
class RKDataSource():
"Wrapper class to connect to MongoDB, also implements a lock that can be used to thread-safe if the connection is used in a pool for reusability"
def __init__(self, **kwargs):
self.database = kwargs['database']
self.client = MongoClient(kwargs['server'], kwargs['port'])
self.db = self.client[self.database]
self.lock = Lock()
'''
def client(self):
return self._client
def collection(self, col=None):
if col:
self._collection = self.db[col]
return self._collection
def find(self, json):
return self._collection.find(json)
def find_one(self, json):
return self._collection.find_one(json)
'''
|
rafaank/rkpylib | rkpylib/rkclusterlock_sample.py | <reponame>rafaank/rkpylib<gh_stars>0
import time
from threading import Thread, current_thread
from rkclusterlock import RKClusterLock
def runClient():
#rlock = RKClusterLock('localhost', 9191,'FKAPP')
rlock = RKClusterLock('172.16.58.3', 9191,'FKAPP')
cur_thread = current_thread()
while 1:
data = ""
resp, data = rlock.acquire(wait=True, acquire_wait_time=5, max_release_time=5)
if resp:
try:
print(f"Got Lock for thread {cur_thread.name} with data {data}")
# Here is what we will do during the lock mode
if data:
try:
int_data = int(data)
int_data = int_data + 1
data = str(int_data)
except:
data = "1"
else:
data = "1"
#time.sleep(10)
finally:
rlock.release(data)
else:
print(f"Failed to get Lock for thread {cur_thread.name}")
time.sleep(1)
"""
print("Lets start some clients now")
rlock = RKClusterLock('172.16.58.3', 9191,'FKAPP')
data = ""
resp, data = rlock.acquire(wait=True, acquire_wait_time=5, max_release_time=5)
print(data)
if data:
try:
int_data = int(data)
int_data = int_data + 1
data = str(int_data)
except:
data = "1"
else:
data = "1"
rlock.release(str(data))
"""
for i in range(200):
print(f"Creating Client {i}")
client = Thread(target=runClient)
client.daemon = False
client.start()
|
rafaank/rkpylib | rkpylib/rkutils.py | <gh_stars>0
import threading
import linecache
import os
import tracemalloc
class RKDict(dict):
"""Simple dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def isdefined(x):
try: x
except NameError: x = None
def setInterval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop(): # executed in another thread
while not stopped.wait(interval): # until stopped
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True # stop if the program exits
t.start()
return stopped
return wrapper
return decorator
def traceback_memory_leaks(limit=50):
snapshot = tracemalloc.take_snapshot()
snapshot = snapshot.filter_traces((
tracemalloc.Filter(True, "fkhttp.py"),
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<frozen importlib._bootstrap_external>"),
tracemalloc.Filter(False, threading.__file__),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics('traceback')
for index, stat in enumerate(top_stats[:limit], 1):
print("#%s: %s memory blocks: %.1f KiB" % (index, stat.count, stat.size / 1024))
for line in stat.traceback.format():
print(line)
def trace_memory_leaks(key_type='lineno', limit=50):
snapshot = tracemalloc.take_snapshot()
snapshot = snapshot.filter_traces((
# tracemalloc.Filter(True, "fkhttp.py"),
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<frozen importlib._bootstrap_external>"),
tracemalloc.Filter(False, threading.__file__),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics(key_type)
total_leak_size = sum(stat.size for stat in top_stats)
print("Total Leak size: %.1f KiB" % ( total_leak_size / 1024))
print("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
print(frame.filename)
# replace "/path/to/module/file.py" with "module/file.py"
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print("#%s: %s:%s: %.1f KiB"
% (index, filename, frame.lineno, stat.size / 1024))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f KiB" % (len(other), size / 1024))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f KiB" % (total / 1024))
|
rafaank/rkpylib | rkpylib/rkhttp.py | <filename>rkpylib/rkhttp.py
"""RKHTTPServer
A single-core multi-threaded HTTPServer that enables sharing of data
and information amongst http requests using a global instance of type RKHTTPGlobals.
RKHTTPGlobals
A thread-safe storage class, that makes it easy to store and retrieve variables
that can be shared amongst requests within a process
Global Functions
globals.register(self, var_name, var_value):
Registers a new variable in the global scope, this variable is accessible
and shares the same value across all threads within the RKHttp instance scope.
globals.unregister(self, var_name):
Unregisters a variable from the global scope
globals.get(var_name):
Returns the values for a global variable
globals.set(var_name, var_value):
Updates value of a global variable.
globals.inc(self, var_name, inc_val = 1):
Increments the value of a global variable with inc_val. You can use negative integers to decrement a value.
Request Variables
request.client_address:
Contains a tuple of the form (host, port) referring to the client’s address.
request.server:
Contains the server instance
request.close_connection:
Boolean that should be set before handle_one_request() returns,
indicating if another request may be expected, or if the connection should be shut down.
request.requestline:
Contains the string representation of the HTTP request line. The terminating CRLF is stripped.
This attribute should be set by handle_one_request(). If no valid request line was processed,
it should be set to the empty string
request.command:
Contains the command (request type). For example, 'GET'
request.path:
Contains the request path
request.request_version:
Contains the version string from the request. For example, 'HTTP/1.0'.
request.parsed_path:
Contains ParseResult object which is retrieved after processing the url through the parse.urlparse(path) function
request.url_params:
Url query params processed into a dictionary for ready to access
request.url_paramsl:
Url query params parsed as a list, Data are returned as a list of name, value pairs
request.url_paramsd:
Url query params processed into a dictionary, The dictionary keys are the unique
query variable names and the values are lists of values for each name.
request.post_data:
Post request data,
request.headers:
Extends access to the request headers dictionary.
request.command:
Contains the command (request type). For example, 'GET' or 'POST'. All other types are unsupported
request.rfile:
Reference to io.BufferedIOBase input stream of BaseHTTPHandler, ready to read from
the start of the optional input data. This should ideally be not required as all
the data is already read and processed in easily readable variables
response.wfile:
Reference to the io.BufferedIOBase output stream of BaseHTTPHandler for writing a
response back to the client. Proper adherence to the HTTP protocol must be used
when writing to this stream in order to achieve successful interoperation with HTTP clients
Response Functions
response.send_response(code, message=None):
Reference to the send_response function of BaseHTTPHandler. Adds a response header
to the headers buffer and logs the accepted request. The HTTP response line is
written to the internal buffer, followed by Server and Date headers. The values for
these two headers are picked up from the version_string() and date_time_string() methods,
respectively. If the server does not intend to send any other headers using the
send_header() method, then send_response() should be followed by an end_headers() call.
response.send_error(code, message=None):
Reference to the send_error function of BaseHTTPHandler. Adds a response header
to the headers buffer and logs the accepted request. The HTTP response line is written
to the internal buffer, followed by Server and Date headers. The values for these
two headers are picked up from the version_string() and date_time_string() methods,
respectively. If the server does not intend to send any other headers using the send_header()
method, then send_response() should be followed by an end_headers() call.
response.send_header(keyword, value):
Reference to the send_header function of BaseHTTPHandler. Adds the HTTP header to an internal
buffer which will be written to the output stream when either end_headers() or flush_headers()
is invoked. keyword should specify the header keyword, with value specifying its value. Note that,
after the send_header calls are done, end_headers() MUST BE called in order to complete the operation
response.end_headers():
Reference to the end_headers function of BaseHTTPHandler. Adds a blank line (indicating the end
of the HTTP headers in the response) to the headers buffer and calls flush_headers().
response.send_exception(code, message=None, exception=None):
Function to send exception information back to the client as a formatted JSON
response.send_json_response(self, code, resp_json):
Function to send JSON response back to the client, this is a wrapper function which wraps the below commonly used code
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
response_text = json.dumps(resp_json, default=json_util.default)
self.wfile.write(response_text.encode("utf-8"))
"""
import errno
import importlib
import socket
import sys
import time
import traceback
import cgi
from http.server import BaseHTTPRequestHandler, HTTPServer
from http.cookies import SimpleCookie as cookie
from urllib import parse
from socketserver import ThreadingMixIn
from threading import Thread, Lock
from .rkutils import RKDict
import logging
import json
import jwt
from bson import json_util
from uuid import uuid1
from time import time
# import importlib
# import importlib.util
global server
class RKHTTPGlobals():
def __init__(self):
"""Creates a new instance of RKHTTPGlobals object that will be used to bind with RKHTTPServer instance."""
self._nof_requests = 0
self._variables = dict()
self._lock = Lock()
self._error = None
self._config = dict()
self._config['parse_post_data'] = False
self._logger = None
def __del__(self):
"""Destructor to destroy the RKHTTPGlobals object instance and all child objects."""
if self._lock.acquire(True, 10):
try:
for key in self._variables:
del self._variables[key]
except:
return False
finally:
self._lock.release()
del self._variables
del self._lock
def register(self, var_name, var_value):
"""Registers a new global variable."""
if self._lock.acquire(True, 1):
try:
if var_name in self._variables:
# Variable already exists
return False
else:
self._variables[var_name] = var_value
self._logger.debug(f'Registered new variable {var_name}')
return True
except Exception as e:
self._logger.exception(str(e))
return False
finally:
self._lock.release()
else:
# failed to get lock
return False
def unregister(self, var_name):
"""Unregisters a global variable."""
if self._lock.acquire(True, 1):
try:
if var_name in self._variables:
del self._variables[var_name]
return True
else:
self._logger.debug(f"Variable {var_name} not found in globals")
return False
except Exception as e:
self._logger.exception(str(e))
return False
finally:
self._lock.release()
#def __getattr__(self, attr):
# return self.get(var_name)
def get(self, var_name):
"""Gets value of a registered global variable, returns variable value if the variable exists and value is successfully fetched else returns None."""
if self._lock.acquire(True, 1):
try:
if var_name in self._variables:
return self._variables[var_name]
else:
self._logger.debug(f"Variable {var_name} not found in globals")
return None
except Exception as e:
self._logger.exception(str(e))
return None
finally:
self._lock.release()
else:
self._logger.debug("Failed to get lock in globals.get")
return None
def set(self, var_name, var_value):
"""Sets value of a registered global variable, returns True if variable value is set else returns False if the variable does not exists or if failed to set the value."""
if self._lock.acquire(True, 1):
try:
if not var_name in self._variables:
self._logger.debug(f"Variable {var_name} not found in globals")
return False
else:
self._variables[var_name] = var_value
return True
except Exception as e:
self._logger.exception(str(e))
return False
finally:
self._lock.release()
else:
# failed to get lock
self._logger.debug("Failed to get lock in globals.set")
return False
def inc(self, var_name, inc_val = 1):
"""Increments a registered variables value by <inc_val>, returns True if value is incremented sucessfully else returns False if failed to set the value."""
if self._lock.acquire(True, 1):
try:
if var_name in self._variables:
if type(self._variables[var_name]) is int:
self._variables[var_name] += inc_val
return True
else:
return False
else:
return False
except Exception as e:
self._logger.exception(str(e))
return False
finally:
self._lock.release()
else:
return False
def RKHTTPHandlerClassFactory(globals):
"""Class factory to customize the initialization of RKHTTPRequestHandler object with additional global parameter."""
class RKHTTPRequestHandler(BaseHTTPRequestHandler):
"""Class RKHTTPRequestHandler derived from BaseHTTPRequestHandler, a subclass to handle all HTTP requests."""
sessioncookies = dict()
def __init__(self, *args, **kwargs):
"""Constructor of RKHTTPRequestHandler, initializes the handler and binds the HTTPGlobals object to the handler."""
self.globals = globals
self.globals._nof_requests += 1
self.sessionidmorsel = None
super(RKHTTPRequestHandler, self).__init__(*args, **kwargs)
def __del__(self):
"""Destructor for the RKHTTPRequestHandler object."""
#super(RKHTTPRequestHandler, self).__del__()
pass
def _session_cookie(self, forcenew=False):
cookiestring = "\n".join(self.headers.get_all('Cookie',failobj=[]))
c = cookie()
c.load(cookiestring)
try:
if forcenew or self.sessioncookies[c['session_id'].value]-time() > 3600:
raise ValueError('new cookie needed')
except:
c['session_id']=uuid1().hex
for m in c:
if m=='session_id':
self.sessioncookies[c[m].value] = time()
c[m]["httponly"] = True
c[m]["max-age"] = 3600
c[m]["expires"] = self.date_time_string(time()+3600)
self.sessionidmorsel = c[m]
break
def do_preprocess(self):
"""Preprocess a request by initializing all request and response parameters that can be used to do the processing of a GET or POST request."""
err = self.globals._error
if isinstance(err, Exception) :
#self.send_error(200, str(err), traceback.print_tb(err.__traceback__))
self.send_exception(500, str(err), err)
return False
self._session_cookie()
if not (self.sessionidmorsel is None):
self.send_header('Set-Cookie',self.sessionidmorsel.OutputString())
try:
self.request = RKDict()
self.request.client_address = self.client_address
self.request.server = self.server
self.request.close_connection = self.close_connection
self.request.requestline = self.requestline
self.request.command = self.command
self.request.path = self.path
self.request.content_type = None
self.request.request_version = self.request_version
self.request.parsed_path = parse.urlparse(self.path)
self.request.url_paramsl = parse.parse_qsl(self.request.parsed_path.query)
self.request.url_paramsd = parse.parse_qs(self.request.parsed_path.query)
self.request.url_params = dict(parse.parse_qsl(self.request.parsed_path.query))
self.request.headers = self.headers
self.request.rfile = self.rfile
self.request.post_data = None
self.response = RKDict()
self.response.wfile = self.wfile
self.response.send_response = self.send_response
self.response.send_error = self.send_error
self.response.send_header = self.send_header
self.response.end_headers = self.end_headers
self.response.send_exception = self.send_exception
self.response.send_json_response = self.send_json_response
self.function = RKHTTP._route_function(self.request.parsed_path.path)
if not self.function:
self.send_error(404, 'Not Found - ' + self.request.parsed_path.path )
return False
return True
except Exception as e:
#self.send_error(500, str(e), traceback.format_exc())
self.send_exception(500, str(e), e)
return False
def do_GET(self):
"""GET request handler, calls the route function attached to the url_path."""
if self.do_preprocess():
try:
self.globals._logger.debug(f'Executing function {self.request.parsed_path.path}')
self.function(self.globals, self.request, self.response)
self.globals._logger.debug(f'Completed function {self.request.parsed_path.path}')
except BrokenPipeError as bpe:
self.globals._logger.exception(str(bpe))
except Exception as e:
try:
#self.send_error(500, str(e), traceback.format_exc())
self.send_exception(500, str(e), e)
except Exception as e:
self.globals._logger.exception(str(e))
def do_POST(self):
"""GET request handler, initalizes the post data and makes it available as a request variable. Later calls the route function attached to the url_path."""
if self.do_preprocess():
try:
if (self.globals._config['parse_post_data']):
post_data = None
ctype = self.headers['content-type']
if ctype:
if ctype == 'application/json':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
if post_data:
post_data = json.loads(post_data.decode('utf-8'))
elif ctype.startswith('multipart/form-data'):
# boundary data needs to be encoded in a binary format
ctype, pdict = cgi.parse_header(self.headers['content-type'])
pdict['boundary'] = bytes(pdict['boundary'], "utf-8")
post_data = cgi.parse_multipart(self.rfile, pdict)
elif ctype == 'application/x-www-form-urlencoded':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
post_data = parse.parse_qs(post_data.decode('utf-8'))
self.request.post_data = post_data
self.request.content_type = ctype
except Exception as e:
self.send_exception(500, f"{str(e)} - An Exception occured trying to read post_data, if you think your post data is correct, then try reading it directly from rfile by setting the global._config['parse_post_data']=False", e)
try:
self.globals._logger.debug(f'Executing function {self.request.parsed_path.path}')
self.function(self.globals, self.request, self.response)
self.globals._logger.debug(f'Completed function {self.request.parsed_path.path}')
except BrokenPipeError as bpe:
self.globals._logger.exception(str(bpe))
except Exception as e:
#self.send_error(500, str(e), traceback.format_exc())
self.send_exception(500, str(e), e)
def log_message(self, format, *args):
"""Override to the default log_message to write all logs to our logger"""
self.globals._logger.debug(format, *args)
def log_error(self, format, *args):
"""Override to the default log_message to write all logs to our logger"""
self.globals._logger.exception(format, *args)
def log_response_text(self, format, *args):
"""Override to the default log_message to write all logs to our logger"""
self.globals._logger.debug(format, *args)
def handle_default(self, request, response):
"""Function to handle all default treated requests, will be deprecated in future"""
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write('RKHTTP is active...'.encode('utf-8'))
def send_exception(self, code, message=None, exception=None):
""" Returns a json formatted and easy readable exception information with traceback information."""
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
resp_json = dict()
resp_json['code'] = code
resp_json['message'] = message
if isinstance(exception, Exception):
#exc_type, exc_value, exc_traceback = sys.exc_info()
#trace_list = traceback.extract_tb(exc_traceback)
trace_list = traceback.extract_tb(exception.__traceback__)
new_trace_list = list()
for idx, val in enumerate(trace_list):
new_trace_list.append(repr(val).replace('<FrameSummary ', '', 1)[:-1])
resp_json['exception'] = new_trace_list
else:
resp_json['exception'] = exception
response_text = json.dumps(resp_json, default=json_util.default)
try:
self.wfile.write(response_text.encode("utf-8"))
except Exception as e:
self.globals._logger.exception(str(e))
def send_json_response(self, code, resp_json):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
response_text = json.dumps(resp_json, default=json_util.default)
self.wfile.write(response_text.encode("utf-8"))
return RKHTTPRequestHandler
class RKHTTPServer(ThreadingMixIn, HTTPServer):
globals = None
class RKHTTP():
"""Wrapper class to initialize a RKHTTPServer instance. Also manages all route_paths and route_functions"""
_routes = dict()
@classmethod
def route(cls, route_str):
""" Decorator to bind url path to a function """
def decorator(f):
cls._routes[route_str] = f
return f
return decorator
@classmethod
def _route_function(cls, url_path):
""" Returns the function associated with the given url_path """
rfunc = cls._routes.get(url_path)
return rfunc
@classmethod
def server(cls, ip_port, app_name, log_file):
""" Returns a new instance of a single core multi-threaded HTTP Server. """
logger = logging.getLogger(app_name)
logger.setLevel(logging.DEBUG) # logging.ERROR
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
log_format = '%(asctime)-15s - %(name)s - %(levelname)-8s - %(message)s'
formatter = logging.Formatter(log_format)
fh.setFormatter(formatter)
logger.addHandler(fh)
globals = RKHTTPGlobals()
globals._logger = logger
s = RKHTTPServer(ip_port, RKHTTPHandlerClassFactory(globals))
s.globals = globals
return s
if __name__ == "__main__":
@RKHTTP.route('/rkhttp.test')
def rkhttp_test_function(globals, request, response):
resp_json = dict()
resp_json['code'] = 200
resp_json['total_requests'] = globals._nof_requests
resp_json['data'] = dict()
resp_json['data']['value'] = 'RKHTTP is running'
response.send_json_response(200, resp_json)
port = 9786
ipaddr = '0.0.0.0'
server = RKHTTP.server((ipaddr, port), "rkhttp", "/var/log/rkhttp.log")
server.globals._config['parse_post_data'] = True
server.serve_forever()
|
sffxzzp/SteamChineseChecker | cn.py | <filename>cn.py
#!/bin/python3
# -*- coding: UTF-8 -*-
import json, urllib, requests, math
from bs4 import BeautifulSoup
def main():
oricount = len(json.loads(open('data.json', 'r', encoding='utf-8').read()))
maxcount = json.loads(requests.get('https://store.steampowered.com/curator/31318556/ajaxgetfilteredrecommendations/render/?query=&start=0&count=0&tagids=&sort=recent&app_types=&curations=&reset=false').text)['total_count']
print('Old data: %s\tNew Data: %s' % (oricount, maxcount))
out = {}
for page in range(0, math.ceil(maxcount / 500)):
cont = json.loads(requests.get('https://store.steampowered.com/curator/31318556/ajaxgetfilteredrecommendations/render/?query=&start=%s&count=%s&tagids=&sort=recent&app_types=&curations=&reset=false' % (500*page, 500)).text)['results_html']
soup = BeautifulSoup(cont, 'html.parser')
for game in soup.select('div.recommendation'):
appid = game.select_one('a.store_capsule').get('data-ds-appid')
desc = game.select_one('div.recommendation_desc').text.replace('"', '').strip()
link = game.select_one('div.recommendation_readmore > a[target=_blank]')
if link == None:
link = ''
else:
link = link.get('href').replace('https://steamcommunity.com/linkfilter/?url=', '')
if 'steamcn.com' in link:
link = link.replace('steamcn.com', 'keylol.com')
out[appid] = {'description': desc, 'link': link}
outstr = json.dumps(out, ensure_ascii=False)
open('data.json', 'w', encoding='utf-8').write(outstr)
if __name__ == '__main__':
main()
|
prman-pixar/RenderManForBlender | rman_translators/rman_openvdb_translator.py | from .rman_translator import RmanTranslator
from ..rman_sg_nodes.rman_sg_openvdb import RmanSgOpenVDB
from ..rfb_utils import filepath_utils
from ..rfb_utils import transform_utils
from ..rfb_utils import string_utils
from ..rfb_utils import scenegraph_utils
from ..rfb_utils.envconfig_utils import envconfig
from ..rfb_logger import rfb_log
import json
class RmanOpenVDBTranslator(RmanTranslator):
def __init__(self, rman_scene):
super().__init__(rman_scene)
self.bl_type = 'VOLUME'
def export(self, ob, db_name):
sg_node = self.rman_scene.sg_scene.CreateVolume(db_name)
sg_node.Define(0,0,0)
rman_sg_openvdb = RmanSgOpenVDB(self.rman_scene, sg_node, db_name)
return rman_sg_openvdb
def export_object_primvars(self, ob, rman_sg_node, sg_node=None):
if not sg_node:
sg_node = rman_sg_node.sg_node
if not sg_node:
return
super().export_object_primvars(ob, rman_sg_node, sg_node=sg_node)
prop_name = 'rman_micropolygonlength_volume'
rm = ob.renderman
rm_scene = self.rman_scene.bl_scene.renderman
meta = rm.prop_meta[prop_name]
val = getattr(rm, prop_name)
inherit_true_value = meta['inherit_true_value']
if float(val) == inherit_true_value:
if hasattr(rm_scene, 'rman_micropolygonlength'):
val = getattr(rm_scene, 'rman_micropolygonlength')
try:
primvars = sg_node.GetPrimVars()
primvars.SetFloat('dice:micropolygonlength', val)
sg_node.SetPrimVars(primvars)
except AttributeError:
rfb_log().debug("Cannot get RtPrimVar for this node")
def export_deform_sample(self, rman_sg_openvdb, ob, time_sample):
pass
def update(self, ob, rman_sg_openvdb):
db = ob.data
rm = db.renderman
primvar = rman_sg_openvdb.sg_node.GetPrimVars()
primvar.Clear()
bounds = transform_utils.convert_ob_bounds(ob.bound_box)
primvar.SetFloatArray(self.rman_scene.rman.Tokens.Rix.k_Ri_Bound, string_utils.convert_val(bounds), 6)
if db.filepath == '':
primvar.SetString(self.rman_scene.rman.Tokens.Rix.k_Ri_type, "box")
rman_sg_openvdb.sg_node.SetPrimVars(primvar)
return
grids = db.grids
if not grids.is_loaded:
if not grids.load():
rfb_log().error("Could not load grids and metadata for volume: %s" % ob.name)
primvar.SetString(self.rman_scene.rman.Tokens.Rix.k_Ri_type, "box")
rman_sg_openvdb.sg_node.SetPrimVars(primvar)
return
active_index = grids.active_index
active_grid = grids[active_index]
if active_grid.data_type not in ['FLOAT', 'DOUBLE']:
rfb_log().error("Active grid is not of float type: %s" % ob.name)
primvar.SetString(self.rman_scene.rman.Tokens.Rix.k_Ri_type, "box")
rman_sg_openvdb.sg_node.SetPrimVars(primvar)
return
openvdb_file = filepath_utils.get_real_path(db.filepath)
if db.is_sequence:
# if we have a sequence, get the current frame filepath from the grids
openvdb_file = filepath_utils.get_real_path(grids.frame_filepath)
openvdb_attrs = dict()
openvdb_attrs['filterWidth'] = getattr(rm, 'openvdb_filterwidth')
openvdb_attrs['velocityScale'] = getattr(rm, 'openvdb_velocityscale')
openvdb_attrs['densityMult'] = getattr(rm, 'openvdb_densitymult')
openvdb_attrs['densityRolloff'] = getattr(rm, 'openvdb_densityrolloff')
json_attrs = str(json.dumps(openvdb_attrs))
primvar.SetString(self.rman_scene.rman.Tokens.Rix.k_Ri_type, "blobbydso:impl_openvdb")
string_args = []
string_args.append(openvdb_file)
string_args.append("%s:fogvolume" % active_grid.name)
if envconfig().build_info.version() >= "24.2":
if rm.openvdb_velocity_grid_name == '__NONE__':
string_args.append('')
else:
string_args.append(rm.openvdb_velocity_grid_name)
string_args.append(json_attrs)
primvar.SetStringArray(self.rman_scene.rman.Tokens.Rix.k_blobbydso_stringargs, string_args, len(string_args))
for i, grid in enumerate(grids):
if grid.data_type in ['FLOAT', 'DOUBLE']:
primvar.SetFloatDetail(grid.name, [], "varying")
elif grid.data_type in ['VECTOR_FLOAT', 'VECTOR_DOUBLE', 'VECTOR_INT']:
primvar.SetVectorDetail(grid.name, [], "varying")
elif grid.data_type in ['INT', 'INT64', 'BOOLEAN']:
primvar.SetIntegerDetail(grid.name, [], "varying")
elif grid.data_type == 'STRING':
primvar.SetStringDetail(grid.name, [], "uniform")
scenegraph_utils.export_vol_aggregate(self.rman_scene.bl_scene, primvar, ob)
primvar.SetInteger("volume:dsominmax", rm.volume_dsominmax)
rman_sg_openvdb.sg_node.SetPrimVars(primvar) |
prman-pixar/RenderManForBlender | rfb_utils/draw_utils.py | from . import shadergraph_utils
from ..rman_constants import NODE_LAYOUT_SPLIT
from .. import rman_config
from .. import rfb_icons
import bpy
import re
def draw_indented_label(layout, label, level):
for i in range(level):
layout.label(text='', icon='BLANK1')
if label:
layout.label(text=label)
def get_open_close_icon(is_open=True):
icon = 'DISCLOSURE_TRI_DOWN' if is_open \
else 'DISCLOSURE_TRI_RIGHT'
return icon
def draw_sticky_toggle(layout, node, prop_name, output_node=None):
if not output_node:
return
if output_node.solo_node_name != '':
return
if not output_node.is_sticky_selected():
return
sticky_prop = '%s_sticky' % prop_name
if hasattr(node, sticky_prop):
sticky_icon = 'HIDE_ON'
if getattr(node, sticky_prop):
sticky_icon = 'HIDE_OFF'
layout.prop(node, sticky_prop, text='', icon=sticky_icon, icon_only=True, emboss=False)
def draw_dsypmeta_item(layout, node, prop_name):
layout.label(text='Meta Data')
row = layout.row()
prop_index_nm = '%s_index' % prop_name
row.template_list("RENDERMAN_UL_Dspy_MetaData_List", "Meta Data",
node, prop_name, node, prop_index_nm)
col = row.column(align=True)
row.context_pointer_set("node", node)
op = col.operator('renderman.add_remove_dspymeta', icon="ADD", text="")
op.collection = prop_name
op.collection_index = prop_index_nm
op.defaultname = 'key'
op.action = 'ADD'
col.context_pointer_set("node", node)
op = col.operator('renderman.add_remove_dspymeta', icon="REMOVE", text="")
op.collection = prop_name
op.collection_index = prop_index_nm
op.action = 'REMOVE'
prop_index = getattr(node, prop_index_nm, None)
if prop_index_nm is None:
return
prop = getattr(node, prop_name)
if prop_index > -1 and prop_index < len(prop):
item = prop[prop_index]
layout.prop(item, 'name')
layout.prop(item, 'type')
layout.prop(item, 'value_%s' % item.type, slider=True)
def _draw_ui_from_rman_config(config_name, panel, context, layout, parent):
row_dict = dict()
row = layout.row(align=True)
col = row.column(align=True)
row_dict['default'] = col
rmcfg = rman_config.__RMAN_CONFIG__.get(config_name, None)
is_rman_interactive_running = context.scene.renderman.is_rman_interactive_running
is_rman_running = context.scene.renderman.is_rman_running
curr_col = col
for param_name, ndp in rmcfg.params.items():
if ndp.panel == panel:
if not hasattr(parent, ndp.name):
continue
has_page = False
page_prop = ''
page_open = False
page_name = ''
editable = getattr(ndp, 'editable', False)
is_enabled = True
if hasattr(ndp, 'page') and ndp.page != '':
page_prop = ndp.page + "_uio"
page_open = getattr(parent, page_prop, False)
page_name = ndp.page
has_page = True
if has_page:
# check if we've already drawn page with arrow
if page_name not in row_dict:
row = layout.row(align=True)
icon = get_open_close_icon(page_open)
row.context_pointer_set("node", parent)
op = row.operator('node.rman_open_close_page', text='', icon=icon, emboss=False)
op.prop_name = page_prop
row.label(text=page_name)
row = layout.row(align=True)
col = row.column()
row_dict[page_name] = col
curr_col = col
else:
curr_col = row_dict[page_name]
else:
curr_col = row_dict['default']
conditionalVisOps = getattr(ndp, 'conditionalVisOps', None)
if conditionalVisOps:
# check if the conditionalVisOp to see if we're disabled
expr = conditionalVisOps.get('expr', None)
node = parent
if expr and not eval(expr):
# conditionalLockOps disable the prop rather
# than hide them
if not hasattr(ndp, 'conditionalLockOps'):
continue
else:
is_enabled = False
label = ndp.label if hasattr(ndp, 'label') else ndp.name
row = curr_col.row()
widget = getattr(ndp, 'widget', '')
options = getattr(ndp, 'options', None)
if ndp.is_array():
if has_page:
if not page_open:
continue
row.label(text='', icon='BLANK1')
ui_prop = param_name + "_uio"
ui_open = getattr(parent, ui_prop)
icon = get_open_close_icon(ui_open)
row.context_pointer_set("node", parent)
op = row.operator('node.rman_open_close_page', text='', icon=icon, emboss=False)
op.prop_name = ui_prop
prop = getattr(parent, param_name)
prop_meta = node.prop_meta[param_name]
sub_prop_names = list(prop)
arraylen_nm = '%s_arraylen' % param_name
arraylen = getattr(parent, arraylen_nm)
prop_label = prop_meta.get('label', param_name)
row.label(text=prop_label + ' [%d]:' % arraylen)
if ui_open:
row2 = curr_col.row()
col = row2.column()
row3 = col.row()
row3.label(text='', icon='BLANK1')
row3.prop(parent, arraylen_nm, text='Size')
for i in range(0, arraylen):
row4 = col.row()
row4.label(text='', icon='BLANK1')
row4.label(text='%s[%d]' % (prop_label, i))
row4.prop(parent, '%s[%d]' % (param_name, i), text='')
elif widget == 'propSearch' and options:
# use a prop_search layout
prop_search_parent = options.get('prop_parent')
prop_search_name = options.get('prop_name')
if has_page:
row.label(text='', icon='BLANK1')
eval(f'row.prop_search(parent, ndp.name, {prop_search_parent}, "{prop_search_name}", text=label)')
else:
if has_page:
if not page_open:
continue
row.label(text='', icon='BLANK1')
row.prop(parent, ndp.name, text=label)
if is_rman_interactive_running:
row.enabled = editable
elif is_rman_running:
row.enabled = False
else:
row.enabled = is_enabled
def draw_prop(node, prop_name, layout, level=0, nt=None, context=None, sticky=False):
if prop_name == "codetypeswitch":
row = layout.row()
if node.codetypeswitch == 'INT':
row.prop_search(node, "internalSearch",
bpy.data, "texts", text="")
elif node.codetypeswitch == 'EXT':
row.prop(node, "shadercode")
elif prop_name == "internalSearch" or prop_name == "shadercode" or prop_name == "expression":
return
else:
prop_meta = node.prop_meta[prop_name]
prop = getattr(node, prop_name, None)
if prop is None:
return
read_only = prop_meta.get('readOnly', False)
not_connectable = prop_meta.get('__noconnection', True)
widget = prop_meta.get('widget', 'default')
prop_hidden = getattr(node, '%s_hidden' % prop_name, False)
prop_disabled = getattr(node, '%s_disabled' % prop_name, False)
if widget == 'null' or prop_hidden:
return
elif widget == 'colorramp':
node_group = bpy.data.node_groups.get(node.rman_fake_node_group, None)
if not node_group:
row = layout.row(align=True)
row.context_pointer_set("node", node)
row.operator('node.rman_fix_ramp')
row.operator('node.rman_fix_all_ramps')
return
ramp_name = prop
ramp_node = node_group.nodes[ramp_name]
layout.enabled = (nt.library is None)
layout.template_color_ramp(
ramp_node, 'color_ramp')
return
elif widget == 'floatramp':
node_group = bpy.data.node_groups.get(node.rman_fake_node_group, None)
if not node_group:
row = layout.row(align=True)
row.context_pointer_set("node", node)
row.operator('node.rman_fix_ramp')
ramp_name = prop
ramp_node = node_group.nodes[ramp_name]
layout.enabled = (nt.library is None)
layout.template_curve_mapping(
ramp_node, 'mapping')
return
elif widget == 'displaymetadata':
draw_dsypmeta_item(layout, node, prop_name)
# double check the conditionalVisOps
# this might be our first time drawing, i.e.: scene was just opened.
conditionalVisOps = prop_meta.get('conditionalVisOps', None)
if conditionalVisOps:
cond_expr = conditionalVisOps.get('expr', None)
if cond_expr:
try:
hidden = not eval(cond_expr)
if prop_meta.get('conditionalLockOps', None):
setattr(node, '%s_disabled' % prop_name, hidden)
prop_disabled = hidden
if hasattr(node, 'inputs') and prop_name in node.inputs:
node.inputs[prop_name].hide = hidden
else:
setattr(node, '%s_hidden' % prop_name, hidden)
if hasattr(node, 'inputs') and prop_name in node.inputs:
node.inputs[prop_name].hide = hidden
if hidden:
return
except:
pass
# else check if the socket with this name is connected
inputs = getattr(node, 'inputs', dict())
socket = inputs.get(prop_name, None)
layout.context_pointer_set("socket", socket)
if socket and socket.is_linked:
input_node = shadergraph_utils.socket_node_input(nt, socket)
icon = get_open_close_icon(socket.ui_open)
split = layout.split()
row = split.row()
draw_indented_label(row, None, level)
row.context_pointer_set("socket", socket)
row.operator('node.rman_open_close_link', text='', icon=icon, emboss=False)
label = prop_meta.get('label', prop_name)
rman_icon = rfb_icons.get_node_icon(input_node.bl_label)
row.label(text=label + ' (%s):' % input_node.name)
if sticky:
return
row.context_pointer_set("socket", socket)
row.context_pointer_set("node", node)
row.context_pointer_set("nodetree", nt)
row.menu('NODE_MT_renderman_connection_menu', text='', icon_value=rman_icon.icon_id)
if socket.ui_open:
draw_node_properties_recursive(layout, context, nt,
input_node, level=level + 1)
else:
row = layout.row(align=True)
row.enabled = not prop_disabled
if prop_meta['renderman_type'] == 'page':
ui_prop = prop_name + "_uio"
ui_open = getattr(node, ui_prop)
icon = get_open_close_icon(ui_open)
split = layout.split(factor=NODE_LAYOUT_SPLIT)
row = split.row()
row.enabled = not prop_disabled
draw_indented_label(row, None, level)
row.context_pointer_set("node", node)
op = row.operator('node.rman_open_close_page', text='', icon=icon, emboss=False)
op.prop_name = ui_prop
sub_prop_names = list(prop)
if shadergraph_utils.has_lobe_enable_props(node):
for pn in sub_prop_names:
if pn.startswith('enable'):
row.prop(node, pn, text='')
sub_prop_names.remove(pn)
break
row.label(text=prop_name.split('.')[-1] + ':')
if ui_open:
draw_props(node, sub_prop_names, layout, level=level + 1, nt=nt, context=context)
elif prop_meta['renderman_type'] == 'array':
ui_prop = prop_name + "_uio"
ui_open = getattr(node, ui_prop)
icon = get_open_close_icon(ui_open)
split = layout.split(factor=NODE_LAYOUT_SPLIT)
row = split.row()
row.enabled = not prop_disabled
draw_indented_label(row, None, level)
row.context_pointer_set("node", node)
op = row.operator('node.rman_open_close_page', text='', icon=icon, emboss=False)
op.prop_name = ui_prop
sub_prop_names = list(prop)
arraylen = getattr(node, '%s_arraylen' % prop_name)
prop_label = prop_meta.get('label', prop_name)
row.label(text=prop_label + ' [%d]:' % arraylen)
if ui_open:
level += 1
row = layout.row(align=True)
col = row.column()
row = col.row()
draw_indented_label(row, None, level)
row.prop(node, '%s_arraylen' % prop_name, text='Size')
for i in range(0, arraylen):
row = layout.row(align=True)
col = row.column()
row = col.row()
array_elem_nm = '%s[%d]' % (prop_name, i)
draw_indented_label(row, None, level)
if array_elem_nm in node.inputs:
op_text = ''
socket = node.inputs[array_elem_nm]
row.context_pointer_set("socket", socket)
row.context_pointer_set("node", node)
row.context_pointer_set("nodetree", nt)
if socket.is_linked:
input_node = shadergraph_utils.socket_node_input(nt, socket)
rman_icon = rfb_icons.get_node_icon(input_node.bl_label)
row.label(text='%s[%d] (%s):' % (prop_label, i, input_node.name))
row.menu('NODE_MT_renderman_connection_menu', text='', icon_value=rman_icon.icon_id)
draw_node_properties_recursive(layout, context, nt, input_node, level=level + 1)
else:
row.prop(node, '%s[%d]' % (prop_name, i), slider=True)
rman_icon = rfb_icons.get_icon('rman_connection_menu')
row.menu('NODE_MT_renderman_connection_menu', text='', icon_value=rman_icon.icon_id)
return
else:
draw_indented_label(row, None, level)
if widget == 'propsearch':
# use a prop_search layout
options = prop_meta['options']
prop_search_parent = options.get('prop_parent')
prop_search_name = options.get('prop_name')
eval(f'row.prop_search(node, prop_name, {prop_search_parent}, "{prop_search_name}")')
elif prop_meta['renderman_type'] in ['struct', 'bxdf', 'vstruct']:
row.label(text=prop_meta['label'])
elif read_only:
if not_connectable:
row2 = row.row()
row2.prop(node, prop_name)
row2.enabled=False
else:
row.label(text=prop_meta['label'])
row2 = row.row()
row2.prop(node, prop_name, text="", slider=True)
row2.enabled=False
else:
row.prop(node, prop_name, slider=True)
if prop_name in inputs:
row.context_pointer_set("socket", socket)
row.context_pointer_set("node", node)
row.context_pointer_set("nodetree", nt)
rman_icon = rfb_icons.get_icon('rman_connection_menu')
row.menu('NODE_MT_renderman_connection_menu', text='', icon_value=rman_icon.icon_id)
if widget in ['fileinput','assetidinput']:
prop_val = getattr(node, prop_name)
if prop_val != '':
from . import texture_utils
from . import scene_utils
if not texture_utils.get_txmanager().is_file_src_tex(prop_val):
colorspace_prop_name = '%s_colorspace' % prop_name
if not hasattr(node, colorspace_prop_name):
return
row = layout.row(align=True)
if texture_utils.get_txmanager().does_file_exist(prop_val):
row.enabled = not prop_disabled
draw_indented_label(row, None, level)
row.prop(node, colorspace_prop_name, text='Color Space')
rman_icon = rfb_icons.get_icon('rman_txmanager')
id = scene_utils.find_node_owner(node)
nodeID = texture_utils.generate_node_id(node, prop_name, ob=id)
op = row.operator('rman_txmgr_list.open_txmanager', text='', icon_value=rman_icon.icon_id)
op.nodeID = nodeID
else:
draw_indented_label(row, None, level)
row.label(text="Input mage does not exists.", icon='ERROR')
def draw_props(node, prop_names, layout, level=0, nt=None, context=None):
layout.context_pointer_set("node", node)
if nt:
layout.context_pointer_set("nodetree", nt)
for prop_name in prop_names:
draw_prop(node, prop_name, layout, level=level, nt=nt, context=context)
def panel_node_draw(layout, context, id_data, output_type, input_name):
ntree = id_data.node_tree
node = shadergraph_utils.find_node(id_data, output_type)
if not node:
layout.label(text="No output node")
else:
input = shadergraph_utils.find_node_input(node, input_name)
draw_nodes_properties_ui(layout, context, ntree)
return True
def draw_nodes_properties_ui(layout, context, nt, input_name='Bxdf',
output_node_type="output"):
output_node = next((n for n in nt.nodes
if hasattr(n, 'renderman_node_type') and n.renderman_node_type == output_node_type), None)
if output_node is None:
return
socket = output_node.inputs[input_name]
node = shadergraph_utils.socket_node_input(nt, socket)
layout.context_pointer_set("nodetree", nt)
layout.context_pointer_set("node", output_node)
layout.context_pointer_set("socket", socket)
if input_name not in ['Light', 'LightFilter']:
split = layout.split(factor=0.35)
split.label(text=socket.name + ':')
split.context_pointer_set("socket", socket)
split.context_pointer_set("node", output_node)
split.context_pointer_set("nodetree", nt)
if socket.is_linked:
rman_icon = rfb_icons.get_node_icon(node.bl_label)
split.menu('NODE_MT_renderman_connection_menu', text='%s (%s)' % (node.name, node.bl_label), icon_value=rman_icon.icon_id)
else:
split.menu('NODE_MT_renderman_connection_menu', text='None', icon='NODE_MATERIAL')
if node is not None:
draw_node_properties_recursive(layout, context, nt, node)
def show_node_sticky_params(layout, node, prop_names, context, nt, output_node, node_label_drawn=False):
label_drawn = node_label_drawn
for prop_name in prop_names:
prop_meta = node.prop_meta[prop_name]
renderman_type = prop_meta.get('renderman_type', '')
if renderman_type == 'page':
prop = getattr(node, prop_name)
sub_prop_names = list(prop)
label_drawn = show_node_sticky_params(layout, node, sub_prop_names, context, nt, output_node, label_drawn)
else:
sticky_prop = '%s_sticky' % prop_name
if not getattr(node, sticky_prop, False):
continue
row = layout.row(align=True)
if not label_drawn:
row = layout.row(align=True)
rman_icon = rfb_icons.get_node_icon(node.bl_label)
row.label(text='%s (%s)' % (node.name, node.bl_label), icon_value=rman_icon.icon_id)
label_drawn = True
row = layout.row(align=True)
inputs = getattr(node, 'inputs', dict())
socket = inputs.get(prop_name, None)
draw_sticky_toggle(row, node, prop_name, output_node)
draw_prop(node, prop_name, row, level=1, nt=nt, context=context, sticky=True)
return label_drawn
def show_node_match_params(layout, node, expr, match_on, prop_names, context, nt, node_label_drawn=False):
pattern = re.compile(expr)
if match_on in ['NODE_NAME', 'NODE_TYPE', 'NODE_LABEL']:
haystack = node.name
if match_on == 'NODE_TYPE':
haystack = node.bl_label
elif match_on == 'NODE_LABEL':
haystack = node.label
if not re.match(pattern, haystack):
return node_label_drawn
label_drawn = node_label_drawn
for prop_name in prop_names:
prop_meta = node.prop_meta[prop_name]
prop_label = prop_meta.get('label', prop_name)
renderman_type = prop_meta.get('renderman_type', '')
if renderman_type == 'page':
prop = getattr(node, prop_name)
sub_prop_names = list(prop)
label_drawn = show_node_match_params(layout, node, expr, match_on, sub_prop_names, context, nt, label_drawn)
else:
if match_on in ['PARAM_LABEL', 'PARAM_NAME']:
haystack = prop_name
if match_on == 'PARAM_LABEL':
haystack = prop_label
if not re.match(pattern, haystack):
continue
row = layout.row(align=True)
if not label_drawn:
row = layout.row(align=True)
rman_icon = rfb_icons.get_node_icon(node.bl_label)
row.label(text='%s (%s)' % (node.name, node.bl_label), icon_value=rman_icon.icon_id)
label_drawn = True
row = layout.row(align=True)
inputs = getattr(node, 'inputs', dict())
socket = inputs.get(prop_name, None)
draw_prop(node, prop_name, row, level=1, nt=nt, context=context, sticky=True)
return label_drawn
def draw_node_properties_recursive(layout, context, nt, node, level=0):
# if this is a cycles node do something different
if not hasattr(node, 'plugin_name') or node.bl_idname == 'PxrOSLPatternNode':
node.draw_buttons(context, layout)
for input in node.inputs:
if input.is_linked:
input_node = shadergraph_utils.socket_node_input(nt, input)
icon = get_open_close_icon(input.show_expanded)
split = layout.split(factor=NODE_LAYOUT_SPLIT)
row = split.row()
draw_indented_label(row, None, level)
label = input.name
rman_icon = rfb_icons.get_node_icon(input_node.bl_label)
row.prop(input, "show_expanded", icon=icon, text='',
icon_only=True, emboss=False)
row.label(text=label + ' (%s):' % input_node.name)
row.context_pointer_set("socket", input)
row.context_pointer_set("node", node)
row.context_pointer_set("nodetree", nt)
row.menu('NODE_MT_renderman_connection_menu', text='', icon_value=rman_icon.icon_id)
if input.show_expanded:
draw_node_properties_recursive(layout, context, nt,
input_node, level=level + 1)
else:
row = layout.row(align=True)
draw_indented_label(row, None, level)
# indented_label(row, socket.name+':')
# don't draw prop for struct type
if input.hide_value:
row.label(text=input.name)
else:
row.prop(input, 'default_value',
slider=True, text=input.name)
row.context_pointer_set("socket", input)
row.context_pointer_set("node", node)
row.context_pointer_set("nodetree", nt)
row.menu('NODE_MT_renderman_connection_menu', text='', icon='NODE_MATERIAL')
else:
draw_props(node, node.prop_names, layout, level, nt=nt, context=context)
layout.separator()
|
prman-pixar/RenderManForBlender | rman_properties/rman_properties_misc/__init__.py | from bpy.props import PointerProperty, StringProperty, BoolProperty, \
EnumProperty, IntProperty, FloatProperty, FloatVectorProperty, \
CollectionProperty, BoolVectorProperty
from ...rfb_utils import shadergraph_utils
from ...rfb_logger import rfb_log
from ... import rman_config
import bpy
class RendermanBlColorRamp(bpy.types.PropertyGroup):
rman_value: FloatVectorProperty(name="value",
default=(1.0, 1.0, 1.0, 1.0), size=4,
subtype="COLOR")
position: FloatProperty(name="position", default=0.0)
class RendermanBlFloatRamp(bpy.types.PropertyGroup):
rman_value: FloatProperty(name="value", default=0.0)
position: FloatProperty(name="position", default=0.0)
class RendermanUserTokenGroup(bpy.types.PropertyGroup):
name: StringProperty(name="Name", default="")
value: StringProperty(name="Value", default="")
class RendermanLightPointer(bpy.types.PropertyGroup):
def validate_light_obj(self, ob):
if shadergraph_utils.is_rman_light(ob, include_light_filters=True):
return True
return False
name: StringProperty(name="name")
light_ob: PointerProperty(type=bpy.types.Object, poll=validate_light_obj)
class RendermanLightGroup(bpy.types.PropertyGroup):
def update_name(self, context):
for member in self.members:
member.light_ob.update_tag(refresh={'DATA'})
def update_members_index(self, context):
if self.members_index < 0:
return
member = self.members[self.members_index]
light_ob = member.light_ob
if context.view_layer.objects.active:
context.view_layer.objects.active.select_set(False)
light_ob.select_set(True)
context.view_layer.objects.active = light_ob
name: StringProperty(name="Group Name", update=update_name)
members: CollectionProperty(type=RendermanLightPointer,
name='Group Members')
members_index: IntProperty(min=-1, default=-1,
update=update_members_index)
class RendermanObjectPointer(bpy.types.PropertyGroup):
def update_name(self, context):
if self.ob_pointer:
self.ob_pointer.update_tag(refresh={'OBJECT'})
name: StringProperty(name="name", update=update_name)
def update_ob_pointer(self, context):
self.ob_pointer.update_tag(refresh={'OBJECT'})
ob_pointer: PointerProperty(type=bpy.types.Object, update=update_ob_pointer)
def update_link(self, context):
light_ob = getattr(context, 'light_ob', None)
if not light_ob:
light_ob = context.active_object
if light_ob.type != 'LIGHT':
return
light_props = shadergraph_utils.get_rman_light_properties_group(light_ob)
if light_props.renderman_light_role not in {'RMAN_LIGHTFILTER', 'RMAN_LIGHT'}:
return
light_ob.update_tag(refresh={'DATA'})
ob = self.ob_pointer
light_props = shadergraph_utils.get_rman_light_properties_group(light_ob)
if light_props.renderman_light_role == 'RMAN_LIGHT':
if self.illuminate == 'OFF':
subset = ob.renderman.rman_lighting_excludesubset.add()
subset.name = light_ob.name
subset.light_ob = light_ob
else:
for j, subset in enumerate(ob.renderman.rman_lighting_excludesubset):
if subset.light_ob == light_ob:
ob.renderman.rman_lighting_excludesubset.remove(j)
break
else:
if self.illuminate == 'OFF':
for j, subset in enumerate(ob.renderman.rman_lightfilter_subset):
if subset.light_ob == light_ob:
ob.renderman.rman_lightfilter_subset.remove(j)
break
else:
subset = ob.renderman.rman_lightfilter_subset.add()
subset.name = light_ob.name
subset.light_ob = light_ob
ob.update_tag(refresh={'OBJECT'})
illuminate: EnumProperty(
name="Illuminate",
update=update_link,
items=[
('DEFAULT', 'Default', ''),
('ON', 'On', ''),
('OFF', 'Off', '')])
class RendermanVolumeAggregate(bpy.types.PropertyGroup):
def update_name(self, context):
for member in self.members:
member.ob_pointer.update_tag(refresh={'OBJECT'})
def update_members_index(self, context):
if self.members_index < 0:
return
member = self.members[self.members_index]
ob = member.ob_pointer
if context.view_layer.objects.active:
context.view_layer.objects.active.select_set(False)
ob.select_set(True)
context.view_layer.objects.active = ob
name: StringProperty(name="Volume Aggregate Name", update=update_name)
members: CollectionProperty(type=RendermanObjectPointer,
name='Aggregate Members')
members_index: IntProperty(min=-1, default=-1, update=update_members_index)
class RendermanGroup(bpy.types.PropertyGroup):
def update_name(self, context):
for member in self.members:
member.ob_pointer.update_tag(refresh={'OBJECT'})
def update_members_index(self, context):
if self.members_index < 0:
return
member = self.members[self.members_index]
ob = member.ob_pointer
if context.view_layer.objects.active:
context.view_layer.objects.active.select_set(False)
ob.select_set(True)
context.view_layer.objects.active = ob
name: StringProperty(name="Group Name", update=update_name)
members: CollectionProperty(type=RendermanObjectPointer,
name='Group Members')
members_index: IntProperty(min=-1, default=-1, update=update_members_index)
class LightLinking(bpy.types.PropertyGroup):
def validate_light_obj(self, ob):
if shadergraph_utils.is_rman_light(ob, include_light_filters=True):
return True
return False
light_ob: PointerProperty(type=bpy.types.Object, poll=validate_light_obj)
members: CollectionProperty(type=RendermanObjectPointer,
name='Group Members')
def update_members_index(self, context):
if self.members_index < 0:
return
member = self.members[self.members_index]
ob = member.ob_pointer
if context.view_layer.objects.active:
context.view_layer.objects.active.select_set(False)
ob.select_set(True)
context.view_layer.objects.active = ob
members_index: IntProperty(min=-1, default=-1, update=update_members_index)
class RendermanMeshPrimVar(bpy.types.PropertyGroup):
name: StringProperty(
name="Variable Name",
description="Name of the exported renderman primitive variable")
data_name: StringProperty(
name="Data Name",
description="Name of the Blender data to export as the primitive variable")
data_source: EnumProperty(
name="Data Source",
description="Blender data type to export as the primitive variable",
items=[('VERTEX_GROUP', 'Vertex Group', ''),
('VERTEX_COLOR', 'Vertex Color', ''),
('VERTEX_ATTR_COLOR', 'Vertex Attr Color', ''),
('UV_TEXTURE', 'UV Texture', '')
]
)
class RendermanReferencePosePrimVars(bpy.types.PropertyGroup):
has_Pref: BoolProperty(name='has_Pref', default=False)
has_WPref: BoolProperty(name='has_WPref', default=False)
has_Nref: BoolProperty(name='has_Nref', default=False)
has_WNref: BoolProperty(name='has_WNref', default=False)
rman__Pref: FloatVectorProperty(name='rman__Pref',
default=(0,0, 0), size=3,
subtype="XYZ")
rman__WPref: FloatVectorProperty(name='rman__WPref',
default=(0,0, 0), size=3,
subtype="XYZ")
rman__Nref: FloatVectorProperty(name='rman__Nref',
default=(0,0, 0), size=3,
subtype="XYZ")
rman__WNref: FloatVectorProperty(name='rman__WNref',
default=(0,0, 0), size=3,
subtype="XYZ")
class RendermanAnimSequenceSettings(bpy.types.PropertyGroup):
animated_sequence: BoolProperty(
name="Animated Sequence",
description="Interpret this archive as an animated sequence (converts #### in file path to frame number)",
default=False)
sequence_in: IntProperty(
name="Sequence In Point",
description="The first numbered file to use",
default=1)
sequence_out: IntProperty(
name="Sequence Out Point",
description="The last numbered file to use",
default=24)
blender_start: IntProperty(
name="Blender Start Frame",
description="The frame in Blender to begin playing back the sequence",
default=1)
class Tab_CollectionGroup(bpy.types.PropertyGroup):
#################
# Tab #
#################
bpy.types.Scene.rm_ipr = BoolProperty(
name="IPR settings",
description="Show some useful setting for the Interactive Rendering",
default=False)
bpy.types.Scene.rm_render = BoolProperty(
name="Render settings",
description="Show some useful setting for the Rendering",
default=False)
bpy.types.Scene.rm_render_external = BoolProperty(
name="Render settings",
description="Show some useful setting for external rendering",
default=False)
bpy.types.Scene.rm_help = BoolProperty(
name="Help",
description="Show some links about RenderMan and the documentation",
default=False)
bpy.types.Scene.rm_env = BoolProperty(
name="Envlight",
description="Show some settings about the selected Env light",
default=False)
bpy.types.Scene.rm_area = BoolProperty(
name="AreaLight",
description="Show some settings about the selected Area Light",
default=False)
bpy.types.Scene.rm_daylight = BoolProperty(
name="DayLight",
description="Show some settings about the selected Day Light",
default=False)
bpy.types.Scene.prm_cam = BoolProperty(
name="Renderman Camera",
description="Show some settings about the camera",
default=False)
classes = [
RendermanBlColorRamp,
RendermanBlFloatRamp,
RendermanUserTokenGroup,
RendermanLightPointer,
RendermanLightGroup,
RendermanObjectPointer,
RendermanGroup,
RendermanVolumeAggregate,
LightLinking,
RendermanMeshPrimVar,
RendermanReferencePosePrimVars,
RendermanAnimSequenceSettings,
Tab_CollectionGroup
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass |
prman-pixar/RenderManForBlender | rman_operators/rman_operators_editors/rman_operators_editors_lightmixer.py | <filename>rman_operators/rman_operators_editors/rman_operators_editors_lightmixer.py
from bpy.props import (StringProperty, BoolProperty, EnumProperty, IntProperty)
from ...rman_ui.rman_ui_base import CollectionPanel
from ...rfb_utils import scene_utils
from ...rfb_utils import shadergraph_utils
from ...rfb_logger import rfb_log
from ... import rfb_icons
from ...rman_operators.rman_operators_collections import return_empty_list
from ...rman_config import __RFB_CONFIG_DICT__ as rfb_config
import bpy
import re
class RENDERMAN_UL_LightMixer_Group_Members_List(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
rm = context.scene.renderman
light = item.light_ob
layout.context_pointer_set("selected_light", light)
op = layout.operator('renderman.remove_light_from_light_mixer_group', text='', icon='REMOVE')
light_shader = shadergraph_utils.get_light_node(light)
if not light_shader:
layout.label(text=light.name)
layout.label(text='NO LIGHT SHADER')
return
icon = rfb_icons.get_light_icon(light_shader.bl_label)
op.group_index = rm.light_mixer_groups_index
layout.label(text=light.name, icon_value=icon.icon_id)
light_rm = shadergraph_utils.get_rman_light_properties_group(light)
if light_shader.bl_label == 'PxrEnvDayLight':
layout.prop(light_shader, 'skyTint', text='')
else:
layout.prop(light_shader, 'enableTemperature', text='Temp')
if light_shader.enableTemperature:
layout.prop(light_shader, 'temperature', text='', slider=True)
else:
layout.prop(light_shader, 'lightColor', text='')
layout.prop(light_shader, 'intensity', slider=True)
layout.prop(light_shader, 'exposure', slider=True)
solo_icon = 'LIGHT'
if light.renderman.solo:
solo_icon = 'OUTLINER_OB_LIGHT'
layout.prop(light.renderman, 'solo', text='', icon=solo_icon, icon_only=True, emboss=False )
mute_icon = 'HIDE_OFF'
if light.renderman.mute:
mute_icon = 'HIDE_ON'
layout.prop(light.renderman, 'mute', text='', icon=mute_icon, icon_only=True, emboss=False)
class PRMAN_OT_Renderman_Open_Light_Mixer_Editor(CollectionPanel, bpy.types.Operator):
bl_idname = "scene.rman_open_light_mixer_editor"
bl_label = "RenderMan Light Mixer Editor"
def updated_light_selected_name(self, context):
light_ob = context.scene.objects.get(self.selected_light_name, None)
if not light_ob:
return
if context.view_layer.objects.active:
context.view_layer.objects.active.select_set(False)
light_ob.select_set(True)
context.view_layer.objects.active = light_ob
def light_list_items(self, context):
pattern = re.compile(self.light_search_filter)
scene = context.scene
rm = scene.renderman
if self.do_light_filter and self.light_search_filter == '':
return return_empty_list(label='No Lights Found')
group_index = rm.light_mixer_groups_index
lights_in_group = []
object_groups = rm.light_mixer_groups
object_group = object_groups[group_index]
lights_in_group = [member.light_ob.name for member in object_group.members]
items = []
for light in scene_utils.get_all_lights(context.scene, include_light_filters=False):
if light.name not in lights_in_group:
if self.do_light_filter and not re.match(pattern, light.name):
continue
items.append((light.name, light.name, ''))
if not items:
return return_empty_list(label='No Lights Found')
elif self.do_light_filter:
items.insert(0, ('0', 'Results (%d)' % len(items), '', '', 0))
else:
items.insert(0, ('0', 'Select Light', '', '', 0))
return items
def update_do_light_filter(self, context):
self.selected_light_name = '0'
selected_light_name: EnumProperty(name="Light", items=light_list_items, update=updated_light_selected_name)
light_search_filter: StringProperty(name="Light Filter Search", default="")
do_light_filter: BoolProperty(name="Filter",
description="Search and add multiple lights",
default=False,
update=update_do_light_filter)
def execute(self, context):
return{'FINISHED'}
def cancel(self, context):
if self.event and self.event.type == 'LEFTMOUSE':
bpy.ops.scene.rman_open_light_mixer_editor('INVOKE_DEFAULT')
def __init__(self):
self.event = None
def invoke(self, context, event):
wm = context.window_manager
width = rfb_config['editor_preferences']['lightmixer_editor']['width']
self.event = event
return wm.invoke_props_dialog(self, width=width)
def draw(self, context):
layout = self.layout
scene = context.scene
rm = scene.renderman
self._draw_collection(context, layout, rm, "Light Mixer Groups",
"collection.add_remove",
"scene.renderman",
"light_mixer_groups", "light_mixer_groups_index",
default_name='mixerGroup_%d' % len(rm.light_mixer_groups))
def draw_item(self, layout, context, item):
scene = context.scene
rm = scene.renderman
light_group = rm.light_mixer_groups[rm.light_mixer_groups_index]
lights = [member.light_ob for member in light_group.members]
row = layout.row(align=True)
row.separator()
box = layout.box()
row = box.row()
split = row.split(factor=0.25)
row = split.row()
row.prop(self, 'do_light_filter', text='', icon='FILTER', icon_only=True)
if not self.do_light_filter:
row.prop(self, 'selected_light_name', text='')
col = row.column()
if self.selected_light_name == '0' or self.selected_light_name == '':
col.enabled = False
op = col.operator("renderman.add_light_to_light_mixer_group", text='', icon='ADD')
op.open_editor = False
else:
col.context_pointer_set('op_ptr', self)
col.context_pointer_set('selected_light', bpy.data.objects[self.selected_light_name])
op = col.operator("renderman.add_light_to_light_mixer_group", text='', icon='ADD')
op.do_scene_selected = False
op.open_editor = False
else:
row.prop(self, 'light_search_filter', text='', icon='VIEWZOOM')
row = box.row()
split = row.split(factor=0.25)
row = split.row()
row.prop(self, 'selected_light_name', text='')
col = row.column()
if self.selected_light_name == '0' or self.selected_light_name == '':
col.enabled = False
op = col.operator("renderman.add_light_to_light_mixer_group", text='', icon='ADD')
op.open_editor = False
else:
col.context_pointer_set('op_ptr', self)
col.context_pointer_set('selected_light', bpy.data.objects[self.selected_light_name])
op = col.operator("renderman.add_light_to_light_mixer_group", text='', icon='ADD')
op.do_scene_selected = False
op.open_editor = False
row = layout.row()
split = row.split(factor=0.25)
op = split.operator('renderman.convert_mixer_group_to_light_group', text='Convert to Light Group')
op.group_index = rm.light_mixer_groups_index
layout.template_list("RENDERMAN_UL_LightMixer_Group_Members_List", "Renderman_light_mixer_list",
light_group, "members", light_group, 'members_index', rows=6)
classes = [
PRMAN_OT_Renderman_Open_Light_Mixer_Editor,
RENDERMAN_UL_LightMixer_Group_Members_List
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass |
prman-pixar/RenderManForBlender | rman_scene.py | # Translators
from .rman_translators.rman_camera_translator import RmanCameraTranslator
from .rman_translators.rman_light_translator import RmanLightTranslator
from .rman_translators.rman_lightfilter_translator import RmanLightFilterTranslator
from .rman_translators.rman_mesh_translator import RmanMeshTranslator
from .rman_translators.rman_material_translator import RmanMaterialTranslator
from .rman_translators.rman_hair_translator import RmanHairTranslator
from .rman_translators.rman_group_translator import RmanGroupTranslator
from .rman_translators.rman_points_translator import RmanPointsTranslator
from .rman_translators.rman_quadric_translator import RmanQuadricTranslator
from .rman_translators.rman_blobby_translator import RmanBlobbyTranslator
from .rman_translators.rman_particles_translator import RmanParticlesTranslator
from .rman_translators.rman_procedural_translator import RmanProceduralTranslator
from .rman_translators.rman_dra_translator import RmanDraTranslator
from .rman_translators.rman_runprogram_translator import RmanRunProgramTranslator
from .rman_translators.rman_openvdb_translator import RmanOpenVDBTranslator
from .rman_translators.rman_gpencil_translator import RmanGPencilTranslator
from .rman_translators.rman_fluid_translator import RmanFluidTranslator
from .rman_translators.rman_curve_translator import RmanCurveTranslator
from .rman_translators.rman_nurbs_translator import RmanNurbsTranslator
from .rman_translators.rman_volume_translator import RmanVolumeTranslator
from .rman_translators.rman_brickmap_translator import RmanBrickmapTranslator
from .rman_translators.rman_emitter_translator import RmanEmitterTranslator
from .rman_translators.rman_empty_translator import RmanEmptyTranslator
from .rman_translators.rman_alembic_translator import RmanAlembicTranslator
# utils
from .rfb_utils import object_utils
from .rfb_utils import transform_utils
from .rfb_utils import property_utils
from .rfb_utils import display_utils
from .rfb_utils import string_utils
from .rfb_utils import texture_utils
from .rfb_utils import filepath_utils
from .rfb_utils.envconfig_utils import envconfig
from .rfb_utils import scene_utils
from .rfb_utils.prefs_utils import get_pref
from .rfb_utils import shadergraph_utils
from .rfb_utils import color_manager_blender
from .rfb_utils import scenegraph_utils
# config
from .rman_config import __RFB_CONFIG_DICT__ as rfb_config
from . import rman_constants
from .rfb_logger import rfb_log
from .rman_sg_nodes.rman_sg_node import RmanSgNode
import bpy
import os
import sys
class RmanScene(object):
'''
The RmanScene handles translating the Blender scene.
Attributes:
rman_render (RmanRender) - pointer back to the current RmanRender object
rman () - rman python module
sg_scene (RixSGSCene) - the RenderMan scene graph object
context (bpy.types.Context) - the current Blender context object
depsgraph (bpy.types.Depsgraph) - the Blender dependency graph
bl_scene (bpy.types.Scene) - the current Blender scene object
bl_frame_current (int) - the current Blender frame
bl_view_layer (bpy.types.ViewLayer) - the current Blender view layer
rm_rl (RendermanRenderLayerSettings) - the current rman layer
do_motion_blur (bool) - user requested for motion blur
rman_bake (bool) - user requested a bake render
is_interactive (bool) - whether we are in interactive mode
external_render (bool) - whether we are exporting for external (RIB) renders
is_viewport_render (bool) - whether we are rendering into Blender's viewport
scene_solo_light (bool) - user has solo'd a light (all other lights are muted)
rman_materials (dict) - dictionary of scene's materials
rman_objects (dict) - dictionary of all objects
rman_translators (dict) - dictionary of all RmanTranslator(s)
rman_particles (dict) - dictionary of all particle systems used
rman_cameras (dict) - dictionary of all cameras in the scene
obj_hash (dict) - dictionary of hashes to objects ( for object picking )
moving_objects (dict) - dictionary of objects that are moving/deforming in the scene
processed_obs (dict) - dictionary of objects already processed
motion_steps (set) - the full set of motion steps for the scene, including
overrides from individual objects
main_camera (RmanSgCamera) - pointer to the main scene camera
rman_root_sg_node (RixSGGroup) - the main root RixSceneGraph node
render_default_light (bool) - whether to add a "headlight" light when there are no lights in the scene
world_df_node (RixSGShader) - a display filter shader that represents the world color
default_light (RixSGAnalyticLight) - the default "headlight" light
viewport_render_res_mult (float) - the current render resolution multiplier (for IPR)
num_object_instances (int) - the current number of object instances. This is used during IPR to
track the number of instances between edits. We try to use this to determine
when an object is added or deleted.
num_objects_in_viewlayer (int) - the current number of objects in the current view layer. We're using this
to keep track if an object was removed from a collection
objects_in_viewlayer (list) - the list of objects (bpy.types.Object) in this view layer.
'''
def __init__(self, rman_render=None):
self.rman_render = rman_render
self.rman = rman_render.rman
self.sg_scene = None
self.context = None
self.depsgraph = None
self.bl_scene = None
self.bl_frame_current = None
self.bl_view_layer = None
self.rm_rl = None
self.do_motion_blur = False
self.rman_bake = False
self.is_interactive = False
self.external_render = False
self.is_viewport_render = False
self.is_swatch_render = False
self.scene_solo_light = False
self.scene_any_lights = False
self.is_xpu = False
self.rman_materials = dict()
self.rman_objects = dict()
self.rman_translators = dict()
self.rman_particles = dict()
self.rman_cameras = dict()
self.obj_hash = dict()
self.moving_objects = dict()
self.processed_obs = []
self.motion_steps = set()
self.main_camera = None
self.rman_root_sg_node = None
self.render_default_light = False
self.world_df_node = None
self.default_light = None
self.viewport_render_res_mult = 1.0
self.num_object_instances = 0
self.num_objects_in_viewlayer = 0
self.objects_in_viewlayer = list()
self.bl_local_view = False
self.create_translators()
def create_translators(self):
# Create our dictionary of translators. The object type is determined
# by the "_detect_primitive_" function in rfb_utils/object_utils.py
self.rman_translators['CAMERA'] = RmanCameraTranslator(rman_scene=self)
self.rman_translators['LIGHT'] = RmanLightTranslator(rman_scene=self)
self.rman_translators['LIGHTFILTER'] = RmanLightFilterTranslator(rman_scene=self)
self.rman_translators['MATERIAL'] = RmanMaterialTranslator(rman_scene=self)
self.rman_translators['HAIR'] = RmanHairTranslator(rman_scene=self)
self.rman_translators['GROUP'] = RmanGroupTranslator(rman_scene=self)
self.rman_translators['EMPTY'] = RmanEmptyTranslator(rman_scene=self)
self.rman_translators['POINTS'] = RmanPointsTranslator(rman_scene=self)
self.rman_translators['META'] = RmanBlobbyTranslator(rman_scene=self)
self.rman_translators['PARTICLES'] = RmanParticlesTranslator(rman_scene=self)
self.rman_translators['EMITTER'] = RmanEmitterTranslator(rman_scene=self)
self.rman_translators['DYNAMIC_LOAD_DSO'] = RmanProceduralTranslator(rman_scene=self)
self.rman_translators['DELAYED_LOAD_ARCHIVE'] = RmanDraTranslator(rman_scene=self)
self.rman_translators['PROCEDURAL_RUN_PROGRAM'] = RmanRunProgramTranslator(rman_scene=self)
self.rman_translators['OPENVDB'] = RmanOpenVDBTranslator(rman_scene=self)
self.rman_translators['GPENCIL'] = RmanGPencilTranslator(rman_scene=self)
self.rman_translators['MESH'] = RmanMeshTranslator(rman_scene=self)
self.rman_translators['QUADRIC'] = RmanQuadricTranslator(rman_scene=self)
self.rman_translators['FLUID'] = RmanFluidTranslator(rman_scene=self)
self.rman_translators['CURVE'] = RmanCurveTranslator(rman_scene=self)
self.rman_translators['NURBS'] = RmanNurbsTranslator(rman_scene=self)
self.rman_translators['RI_VOLUME'] = RmanVolumeTranslator(rman_scene=self)
self.rman_translators['BRICKMAP'] = RmanBrickmapTranslator(rman_scene=self)
self.rman_translators['ALEMBIC'] = RmanAlembicTranslator(rman_scene=self)
def _find_renderman_layer(self):
self.rm_rl = None
if self.bl_view_layer.renderman.use_renderman:
self.rm_rl = self.bl_view_layer.renderman
def reset(self):
# clear out dictionaries etc.
self.rman_materials.clear()
self.rman_objects.clear()
self.rman_particles.clear()
self.rman_cameras.clear()
self.obj_hash.clear()
self.motion_steps = set()
self.moving_objects.clear()
self.processed_obs.clear()
self.render_default_light = False
self.world_df_node = None
self.default_light = None
if self.is_viewport_render:
self.viewport_render_res_mult = float(self.context.scene.renderman.viewport_render_res_mult)
else:
self.viewport_render_res_mult = 1.0
self.is_xpu = False
self.num_object_instances = 0
self.num_objects_in_viewlayer = 0
self.objects_in_viewlayer.clear()
def export_for_final_render(self, depsgraph, sg_scene, bl_view_layer, is_external=False):
self.sg_scene = sg_scene
self.context = bpy.context
self.bl_scene = depsgraph.scene_eval
self.bl_view_layer = bl_view_layer
self._find_renderman_layer()
self.depsgraph = depsgraph
self.external_render = is_external
self.is_interactive = False
self.is_viewport_render = False
self.bl_local_view = False
self.do_motion_blur = self.bl_scene.renderman.motion_blur
self.export()
def export_for_bake_render(self, depsgraph, sg_scene, bl_view_layer, is_external=False):
self.sg_scene = sg_scene
self.context = bpy.context
self.bl_scene = depsgraph.scene_eval
self.bl_view_layer = bl_view_layer
self._find_renderman_layer()
self.depsgraph = depsgraph
self.external_render = is_external
self.is_interactive = False
self.is_viewport_render = False
self.do_motion_blur = self.bl_scene.renderman.motion_blur
self.rman_bake = True
self.bl_local_view = False
if self.bl_scene.renderman.hider_type == 'BAKE_BRICKMAP_SELECTED':
self.export_bake_brickmap_selected()
else:
self.export_bake_render_scene()
def export_for_interactive_render(self, context, depsgraph, sg_scene):
self.sg_scene = sg_scene
self.context = context
self.bl_view_layer = context.view_layer
self.bl_scene = depsgraph.scene_eval
self._find_renderman_layer()
self.bl_local_view = context.space_data.local_view
self.depsgraph = depsgraph
self.external_render = False
self.is_interactive = True
self.is_viewport_render = False
self.rman_bake = False
if self.bl_scene.renderman.render_ipr_into == 'blender':
self.is_viewport_render = True
self.do_motion_blur = False
self.export()
def export_for_rib_selection(self, context, sg_scene):
self.reset()
self.bl_scene = context.scene
self.bl_local_view = False
self.bl_frame_current = self.bl_scene.frame_current
self.sg_scene = sg_scene
self.context = context
self.bl_view_layer = context.view_layer
self._find_renderman_layer()
self.rman_bake = False
self.depsgraph = context.evaluated_depsgraph_get()
self.export_root_sg_node()
objs = context.selected_objects
self.export_materials([m for m in self.depsgraph.ids if isinstance(m, bpy.types.Material)])
self.export_data_blocks(objs)
self.export_instances(obj_selected=objs)
def export_for_swatch_render(self, depsgraph, sg_scene):
self.sg_scene = sg_scene
self.context = bpy.context #None
self.bl_local_view = False
self.bl_scene = depsgraph.scene_eval
self.depsgraph = depsgraph
self.external_render = False
self.is_interactive = False
self.is_viewport_render = False
self.do_motion_blur = False
self.rman_bake = False
self.is_swatch_render = True
self.export_swatch_render_scene()
def export(self):
self.reset()
self.render_default_light = self.bl_scene.renderman.render_default_light
if sys.platform != "darwin":
self.is_xpu = (self.bl_scene.renderman.renderVariant != 'prman')
# update variables
string_utils.set_var('scene', self.bl_scene.name.replace(' ', '_'))
string_utils.set_var('layer', self.bl_view_layer.name.replace(' ', '_'))
self.bl_frame_current = self.bl_scene.frame_current
rfb_log().debug("Creating root scene graph node")
self.export_root_sg_node()
rfb_log().debug("Calling export_materials()")
#self.export_materials(bpy.data.materials)
self.export_materials([m for m in self.depsgraph.ids if isinstance(m, bpy.types.Material)])
# tell the texture manager to start converting any unconverted textures
# normally textures are converted as they are added to the scene
rfb_log().debug("Calling txmake_all()")
texture_utils.get_txmanager().rman_scene = self
texture_utils.get_txmanager().txmake_all(blocking=True)
self.scene_any_lights = self._scene_has_lights()
rfb_log().debug("Calling export_data_blocks()")
#self.export_data_blocks(bpy.data.objects)
self.export_data_blocks([x for x in self.depsgraph.ids if isinstance(x, bpy.types.Object)])
self.export_searchpaths()
self.export_global_options()
self.export_hider()
self.export_integrator()
self.export_cameras([c for c in self.depsgraph.objects if isinstance(c.data, bpy.types.Camera)])
# export default light
self.export_defaultlight()
self.main_camera.sg_node.AddChild(self.default_light)
self.export_displays()
self.export_samplefilters()
self.export_displayfilters()
if self.do_motion_blur:
rfb_log().debug("Calling export_instances_motion()")
self.export_instances_motion()
else:
rfb_log().debug("Calling export_instances()")
self.export_instances()
self.rman_render.stats_mgr.set_export_stats("Finished Export", 1.0)
self.num_object_instances = len(self.depsgraph.object_instances)
self.num_objects_in_viewlayer = len(self.depsgraph.view_layer.objects)
self.objects_in_viewlayer = [o for o in self.depsgraph.view_layer.objects]
self.check_solo_light()
if self.is_interactive:
self.export_viewport_stats()
else:
self.export_stats()
def export_bake_render_scene(self):
self.reset()
# update tokens
string_utils.set_var('scene', self.bl_scene.name.replace(' ', '_'))
string_utils.set_var('layer', self.bl_view_layer.name.replace(' ', '_'))
self.bl_frame_current = self.bl_scene.frame_current
rfb_log().debug("Creating root scene graph node")
self.export_root_sg_node()
rfb_log().debug("Calling export_materials()")
self.export_materials([m for m in self.depsgraph.ids if isinstance(m, bpy.types.Material)])
rfb_log().debug("Calling txmake_all()")
texture_utils.get_txmanager().rman_scene = self
texture_utils.get_txmanager().txmake_all(blocking=True)
self.scene_any_lights = self._scene_has_lights()
rm = self.bl_scene.renderman
rman_root_sg_node = self.get_root_sg_node()
attrs = rman_root_sg_node.GetAttributes()
attrs.SetFloat("dice:worlddistancelength", rm.rman_bake_illlum_density)
rman_root_sg_node.SetAttributes(attrs)
rfb_log().debug("Calling export_data_blocks()")
self.export_data_blocks(bpy.data.objects)
self.export_searchpaths()
self.export_global_options()
self.export_hider()
self.export_integrator()
self.export_cameras([c for c in self.depsgraph.objects if isinstance(c.data, bpy.types.Camera)])
self.export_bake_displays()
self.export_samplefilters()
self.export_displayfilters()
if self.do_motion_blur:
rfb_log().debug("Calling export_instances_motion()")
self.export_instances_motion()
else:
rfb_log().debug("Calling export_instances()")
self.export_instances()
options = self.sg_scene.GetOptions()
bake_resolution = int(rm.rman_bake_illlum_res)
options.SetIntegerArray(self.rman.Tokens.Rix.k_Ri_FormatResolution, (bake_resolution, bake_resolution), 2)
self.sg_scene.SetOptions(options)
def export_bake_brickmap_selected(self):
self.reset()
# update variables
string_utils.set_var('scene', self.bl_scene.name.replace(' ', '_'))
string_utils.set_var('layer', self.bl_view_layer.name.replace(' ', '_'))
self.bl_frame_current = self.bl_scene.frame_current
rfb_log().debug("Creating root scene graph node")
self.export_root_sg_node()
rfb_log().debug("Calling export_materials()")
self.export_materials([m for m in self.depsgraph.ids if isinstance(m, bpy.types.Material)])
rfb_log().debug("Calling txmake_all()")
texture_utils.get_txmanager().rman_scene = self
texture_utils.get_txmanager().txmake_all(blocking=True)
self.scene_any_lights = self._scene_has_lights()
rm = self.bl_scene.renderman
rman_root_sg_node = self.get_root_sg_node()
attrs = rman_root_sg_node.GetAttributes()
attrs.SetFloat("dice:worlddistancelength", rm.rman_bake_illlum_density)
rman_root_sg_node.SetAttributes(attrs)
self.export_searchpaths()
self.export_global_options()
self.export_hider()
self.export_integrator()
self.export_cameras([c for c in self.depsgraph.objects if isinstance(c.data, bpy.types.Camera)])
ob = self.context.active_object
self.export_materials([m for m in self.depsgraph.ids if isinstance(m, bpy.types.Material)])
objects_needed = [x for x in self.bl_scene.objects if object_utils._detect_primitive_(x) == 'LIGHT']
objects_needed.append(ob)
self.export_data_blocks(objects_needed)
self.export_instances()
self.export_samplefilters()
self.export_displayfilters()
options = self.sg_scene.GetOptions()
bake_resolution = int(rm.rman_bake_illlum_res)
options.SetIntegerArray(self.rman.Tokens.Rix.k_Ri_FormatResolution, (bake_resolution, bake_resolution), 2)
self.sg_scene.SetOptions(options)
# Display
display_driver = 'pointcloud'
dspy_chan_Ci = self.rman.SGManager.RixSGDisplayChannel('color', 'Ci')
self.sg_scene.SetDisplayChannel([dspy_chan_Ci])
render_output = '%s.ptc' % ob.renderman.bake_filename_attr
render_output = string_utils.expand_string(render_output)
display = self.rman.SGManager.RixSGShader("Display", display_driver, render_output)
display.params.SetString("mode", 'Ci')
self.main_camera.sg_camera_node.SetDisplay(display)
def export_swatch_render_scene(self):
self.reset()
# options
options = self.sg_scene.GetOptions()
options.SetInteger(self.rman.Tokens.Rix.k_hider_minsamples, get_pref('rman_preview_renders_minSamples', default=0))
options.SetInteger(self.rman.Tokens.Rix.k_hider_maxsamples, get_pref('rman_preview_renders_minSamples', default=1))
options.SetInteger(self.rman.Tokens.Rix.k_hider_incremental, 1)
options.SetString("adaptivemetric", "variance")
scale = 100.0 / self.bl_scene.render.resolution_percentage
w = int(self.bl_scene.render.resolution_x * scale)
h = int(self.bl_scene.render.resolution_y * scale)
options.SetIntegerArray(self.rman.Tokens.Rix.k_Ri_FormatResolution, (w, h), 2)
options.SetFloat(self.rman.Tokens.Rix.k_Ri_PixelVariance, get_pref('rman_preview_renders_pixelVariance', default=0.15))
options.SetInteger(self.rman.Tokens.Rix.k_limits_threads, -2)
options.SetString(self.rman.Tokens.Rix.k_bucket_order, 'horizontal')
self.sg_scene.SetOptions(options)
# searchpaths
self.export_searchpaths()
# integrator
integrator_sg = self.rman.SGManager.RixSGShader("Integrator", "PxrDirectLighting", "integrator")
self.sg_scene.SetIntegrator(integrator_sg)
# camera
self.export_cameras([c for c in self.depsgraph.objects if isinstance(c.data, bpy.types.Camera)])
# Display
display_driver = 'blender'
dspy_chan_Ci = self.rman.SGManager.RixSGDisplayChannel('color', 'Ci')
dspy_chan_a = self.rman.SGManager.RixSGDisplayChannel('float', 'a')
self.sg_scene.SetDisplayChannel([dspy_chan_Ci, dspy_chan_a])
display = self.rman.SGManager.RixSGShader("Display", display_driver, 'blender_preview')
display.params.SetString("mode", 'Ci,a')
self.main_camera.sg_camera_node.SetDisplay(display)
rfb_log().debug("Calling materials()")
self.export_materials([m for m in self.depsgraph.ids if isinstance(m, bpy.types.Material)])
rfb_log().debug("Calling export_data_blocks()")
self.export_data_blocks([m for m in self.depsgraph.ids if isinstance(m, bpy.types.Object)])
self.export_instances()
def export_root_sg_node(self):
rm = self.bl_scene.renderman
root_sg = self.get_root_sg_node()
attrs = root_sg.GetAttributes()
# set any properties marked riattr in the config file
for prop_name, meta in rm.prop_meta.items():
if 'riattr' not in meta:
continue
val = getattr(rm, prop_name)
ri_name = meta['riattr']
is_array = False
array_len = -1
if 'arraySize' in meta:
is_array = True
array_len = meta['arraySize']
if type(val) == str and val.startswith('['):
val = eval(val)
param_type = meta['renderman_type']
property_utils.set_rix_param(attrs, param_type, ri_name, val, is_reference=False, is_array=is_array, array_len=array_len, node=rm)
if rm.invert_light_linking:
all_lights = [string_utils.sanitize_node_name(l.name) for l in scene_utils.get_all_lights(self.bl_scene, include_light_filters=False)]
all_lightfilters = [string_utils.sanitize_node_name(l.name) for l in scene_utils.get_all_lightfilters(self.bl_scene)]
for ll in rm.light_links:
light_ob = ll.light_ob
light_nm = string_utils.sanitize_node_name(light_ob.name)
light_props = shadergraph_utils.get_rman_light_properties_group(light_ob)
if light_props.renderman_light_role == 'RMAN_LIGHT':
if light_nm in all_lights:
all_lights.remove(light_nm)
elif light_nm in all_lightfilters:
all_lightfilters.remove(light_nm)
if all_lights:
attrs.SetString(self.rman.Tokens.Rix.k_lighting_subset, ' '. join(all_lights) )
else:
attrs.SetString(self.rman.Tokens.Rix.k_lighting_subset, '*')
if all_lightfilters:
attrs.SetString(self.rman.Tokens.Rix.k_lightfilter_subset, ' '. join(all_lightfilters) )
else:
attrs.SetString(self.rman.Tokens.Rix.k_lightfilter_subset, '*')
root_sg.SetAttributes(attrs)
def get_root_sg_node(self):
return self.sg_scene.Root()
def export_materials(self, materials):
for mat in materials:
db_name = object_utils.get_db_name(mat)
rman_sg_material = self.rman_translators['MATERIAL'].export(mat.original, db_name)
if rman_sg_material:
self.rman_materials[mat.original] = rman_sg_material
def export_data_blocks(self, data_blocks):
total = len(data_blocks)
for i, obj in enumerate(data_blocks):
if obj.type not in ('ARMATURE', 'CAMERA'):
ob = obj.evaluated_get(self.depsgraph)
self.export_data_block(ob)
rfb_log().debug(" Exported %d/%d data blocks... (%s)" % (i, total, obj.name))
self.rman_render.stats_mgr.set_export_stats("Exporting data blocks",i/total)
def export_data_block(self, db_ob):
# FIXME?
# We currently export a unique geometry/mesh per Object
# This means we're not actually sharing datablocks per Object, even if they are shared
# in Blender. We do this for a couple of reasons:
#
# 1. Each object can have different modifiers applied. This includes applying a subdiv and/or bevel modifiers.
# 2. Each object may want a different number of deformation motion samples
#
# This is incredibly wasteful when these don't apply. We could try and detect this case and
# create a shareable geometry.
obj = bpy.data.objects.get(db_ob.name, None)
if not obj and self.is_swatch_render:
obj = db_ob
elif obj.type != db_ob.type:
obj = db_ob
if obj and obj.type not in ('ARMATURE', 'CAMERA'):
ob = obj.evaluated_get(self.depsgraph)
rman_type = object_utils._detect_primitive_(ob)
db_name = object_utils.get_db_name(ob, rman_type=rman_type)
if rman_type == 'LIGHT':
if ob.data.renderman.renderman_light_role == 'RMAN_LIGHTFILTER':
# skip if this is a light filter
# these will be exported when we do regular lights
return
translator = self.rman_translators.get(rman_type, None)
if not translator:
return
rman_sg_node = None
if ob.original in self.rman_objects:
return
rman_sg_node = translator.export(ob, db_name)
if not rman_sg_node:
return
rman_sg_node.rman_type = rman_type
self.rman_objects[ob.original] = rman_sg_node
if self.is_interactive and not ob.show_instancer_for_viewport:
rman_sg_node.sg_node.SetHidden(1)
elif not ob.show_instancer_for_render:
rman_sg_node.sg_node.SetHidden(1)
if rman_type in ['MESH', 'POINTS']:
# Deal with any particles now. Particles are children to mesh nodes.
subframes = []
if self.do_motion_blur:
subframes = scene_utils._get_subframes_(2, self.bl_scene)
self.motion_steps.update(subframes)
if len(ob.particle_systems) > 0:
particles_group_db = ''
rman_sg_node.rman_sg_particle_group_node = self.rman_translators['GROUP'].export(None, particles_group_db)
psys_translator = self.rman_translators['PARTICLES']
for psys in ob.particle_systems:
psys_db_name = '%s' % psys.name
rman_sg_particles = psys_translator.export(ob, psys, psys_db_name)
if not rman_sg_particles:
continue
psys_translator.set_motion_steps(rman_sg_particles, subframes)
psys_translator.update(ob, psys, rman_sg_particles)
ob_psys = self.rman_particles.get(ob.original, dict())
ob_psys[psys.settings.original] = rman_sg_particles
self.rman_particles[ob.original] = ob_psys
self.rman_objects[psys.settings.original] = rman_sg_particles
self.processed_obs.append(psys.settings.original)
rman_sg_node.rman_sg_particle_group_node.sg_node.AddChild(rman_sg_particles.sg_node)
elif rman_type == 'EMPTY' and (ob.hide_render or ob.hide_viewport):
# Make sure empties that are hidden still go out. Children
# could still be visible
self._export_hidden_instance(ob, rman_sg_node)
return rman_sg_node
# motion blur
# we set motion steps for this object, even if it's not moving
# it could be moving as part of a particle system
mb_segs = -1
mb_deform_segs = -1
if self.do_motion_blur:
mb_segs = self.bl_scene.renderman.motion_segments
mb_deform_segs = self.bl_scene.renderman.deform_motion_segments
if ob.renderman.motion_segments_override:
mb_segs = ob.renderman.motion_segments
if mb_segs > 1:
subframes = scene_utils._get_subframes_(mb_segs, self.bl_scene)
rman_sg_node.motion_steps = subframes
self.motion_steps.update(subframes)
if ob.renderman.motion_segments_override:
mb_deform_segs = ob.renderman.deform_motion_segments
if mb_deform_segs > 1:
subframes = scene_utils._get_subframes_(mb_deform_segs, self.bl_scene)
rman_sg_node.deform_motion_steps = subframes
self.motion_steps.update(subframes)
if rman_sg_node.is_transforming or rman_sg_node.is_deforming:
if mb_segs > 1 or mb_deform_segs > 1:
self.moving_objects[ob.name_full] = ob
if mb_segs < 1:
rman_sg_node.is_transforming = False
if mb_deform_segs < 1:
rman_sg_node.is_deforming = False
def export_defaultlight(self):
# Export a headlight light if needed
if not self.default_light:
self.default_light = self.sg_scene.CreateAnalyticLight('__defaultlight')
sg_node = self.rman.SGManager.RixSGShader("Light", 'PxrDistantLight' , "light")
self.default_light.SetLight(sg_node)
s_orientPxrLight = [-1.0, 0.0, -0.0, 0.0,
-0.0, -1.0, -0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
0.0, 0.0, 0.0, 1.0]
self.default_light.SetOrientTransform(s_orientPxrLight)
if self.render_default_light and not self.scene_any_lights:
self.default_light.SetHidden(0)
else:
self.default_light.SetHidden(1)
def _scene_has_lights(self):
# Determine if there are any lights in the scene
num_lights = len(scene_utils.get_all_lights(self.bl_scene, include_light_filters=False))
return num_lights > 0
def _export_hidden_instance(self, ob, rman_sg_node):
translator = self.rman_translators.get('EMPTY')
translator.export_object_attributes(ob, rman_sg_node)
self.attach_material(ob, rman_sg_node)
if ob.parent and object_utils._detect_primitive_(ob.parent) == 'EMPTY':
rman_empty_node = self.rman_objects.get(ob.parent.original)
if not rman_empty_node:
# Empty was not created. Export it.
parent = ob.parent
rman_empty_node = self.export_data_block(parent)
rman_empty_node.sg_node.AddChild(rman_sg_node.sg_node)
else:
self.get_root_sg_node().AddChild(rman_sg_node.sg_node)
translator.export_transform(ob, rman_sg_node.sg_node)
if ob.renderman.export_as_coordsys:
self.get_root_sg_node().AddCoordinateSystem(rman_sg_node.sg_node)
def _export_instance(self, ob_inst, seg=None):
group_db_name = object_utils.get_group_db_name(ob_inst)
rman_group_translator = self.rman_translators['GROUP']
parent_sg_node = None
rman_sg_particles = None
psys = None
parent = None
if ob_inst.is_instance:
parent = ob_inst.parent
ob = ob_inst.instance_object
psys = ob_inst.particle_system
if psys:
# This object was instanced as part of a particle system. Add the object
# to particle system's owner' objects_instanced set.
parent_sg_node = self.rman_objects.get(parent.original, None)
if parent_sg_node:
parent_sg_node.objects_instanced.add(ob.original)
else:
#if parent.type == "EMPTY" and parent.is_instancer:
if parent.is_instancer:
parent_db_name = object_utils.get_db_name(parent)
parent_sg_node = self.rman_objects.get(parent.original, None)
if not parent_sg_node:
parent_sg_node = rman_group_translator.export(parent, parent_db_name)
self.rman_objects[parent.original] = parent_sg_node
else:
ob = ob_inst.object
if ob.type in ('ARMATURE', 'CAMERA'):
return
rman_type = object_utils._detect_primitive_(ob)
if rman_type == 'LIGHTFILTER':
# light filters are part of lights, so when light instances
# are exported, light filterrs should go along with them
return
elif ob.type == "EMPTY" and ob.is_instancer:
rman_sg_node = self.rman_objects.get(ob.original, None)
if not rman_sg_node:
empty_db_name = object_utils.get_db_name(ob)
rman_sg_node = rman_group_translator.export(ob, empty_db_name)
self.rman_objects[ob.original] = rman_sg_node
else:
if rman_type == 'EMPTY':
# this is just a regular empty object.
rman_sg_node = self.rman_objects.get(ob.original, None)
if rman_sg_node:
self._export_hidden_instance(ob, rman_sg_node)
return
if rman_type == "META":
# only add the meta instance that matches the family name
if ob.name_full != object_utils.get_meta_family(ob):
return
rman_sg_node = self.rman_objects.get(ob.original, None)
if not rman_sg_node:
return
translator = self.rman_translators.get(rman_type, None)
if not translator:
return
if group_db_name in rman_sg_node.instances:
# we've already added this instance
return
else:
if not ob.original in self.processed_obs:
translator.update(ob, rman_sg_node)
translator.export_object_primvars(ob, rman_sg_node)
self.processed_obs.append(ob.original)
rman_sg_group = rman_group_translator.export(ob, group_db_name)
if ob.is_instancer and ob.instance_type != 'NONE':
rman_sg_group.is_instancer = ob.is_instancer
if rman_sg_node.sg_node is None:
# add the group to the root anyways
db_name = object_utils.get_db_name(ob, rman_type=rman_type)
rman_sg_group.db_name = db_name
self.get_root_sg_node().AddChild(rman_sg_group.sg_node)
self.rman_objects[ob.original] = rman_sg_group
return
rman_sg_group.sg_node.AddChild(rman_sg_node.sg_node)
rman_sg_group.rman_sg_node_instance = rman_sg_node
if rman_sg_node.rman_sg_particle_group_node:
if (len(ob.particle_systems) > 0) and ob_inst.show_particles:
rman_sg_group.sg_node.AddChild(rman_sg_node.rman_sg_particle_group_node.sg_node)
if ob.parent and object_utils._detect_primitive_(ob.parent) == 'EMPTY':
# this object is a child of an empty. Add it to the empty.
rman_empty_node = self.rman_objects.get(ob.parent.original)
rman_sg_group.sg_node.SetInheritTransform(False) # we don't want to inherit the transform
rman_empty_node.sg_node.AddChild(rman_sg_group.sg_node)
else:
self.get_root_sg_node().AddChild(rman_sg_group.sg_node)
# add this instance to rman_sg_node
rman_sg_node.instances[group_db_name] = rman_sg_group
# object attrs
translator.export_object_attributes(ob, rman_sg_group)
translator.export_object_id(ob, rman_sg_group, ob_inst)
# attach material
if psys:
self.attach_particle_material(psys.settings, parent, ob, rman_sg_group)
rman_sg_group.bl_psys_settings = psys.settings.original
else:
self.attach_material(ob, rman_sg_group)
# check local view
if self.is_interactive:
if parent:
if not parent.visible_in_viewport_get(self.context.space_data):
rman_sg_group.sg_node.SetHidden(1)
else:
rman_sg_group.sg_node.SetHidden(-1)
else:
if not ob.visible_in_viewport_get(self.context.space_data):
rman_sg_group.sg_node.SetHidden(1)
else:
rman_sg_group.sg_node.SetHidden(-1)
if rman_type == "META":
# meta/blobbies are already in world space. Their instances don't need to
# set a transform.
return
if rman_sg_node.is_transforming:
rman_group_translator.update_transform_num_samples(rman_sg_group, rman_sg_node.motion_steps )
rman_group_translator.update_transform_sample(ob_inst, rman_sg_group, 0, seg )
elif psys and self.do_motion_blur:
rman_group_translator.update_transform_num_samples(rman_sg_group, rman_sg_node.motion_steps )
rman_group_translator.update_transform_sample(ob_inst, rman_sg_group, 0, seg )
else:
rman_group_translator.update_transform(ob_inst, rman_sg_group)
def export_instances(self, obj_selected=None):
total = len(self.depsgraph.object_instances)
obj_selected_names = []
if obj_selected:
obj_selected_names = [o.name for o in obj_selected]
for i, ob_inst in enumerate(self.depsgraph.object_instances):
if obj_selected:
objFound = False
if ob_inst.is_instance:
if ob_inst.instance_object.name in obj_selected_names:
objFound = True
elif ob_inst.object.name in obj_selected_names:
objFound = True
if not objFound:
continue
#if not self.is_interactive and not ob_inst.show_self:
# continue
self._export_instance(ob_inst)
self.rman_render.stats_mgr.set_export_stats("Exporting instances", i/total)
rfb_log().debug(" Exported %d/%d instances..." % (i, total))
def attach_material(self, ob, rman_sg_node):
mat = object_utils.get_active_material(ob)
if mat:
rman_sg_material = self.rman_materials.get(mat.original, None)
if rman_sg_material and rman_sg_material.sg_node:
scenegraph_utils.set_material(rman_sg_node.sg_node, rman_sg_material.sg_node)
rman_sg_node.is_meshlight = rman_sg_material.has_meshlight
def attach_particle_material(self, psys_settings, parent, ob, group):
# This function should only be used by particle instancing.
# For emitters and hair, the material attachment is done in either
# the emitter translator or hair translator directly
if not object_utils.is_particle_instancer(psys=None, particle_settings=psys_settings):
return
if psys_settings.renderman.override_instance_material:
mat_idx = psys_settings.material - 1
if mat_idx < len(parent.material_slots):
mat = parent.material_slots[mat_idx].material
rman_sg_material = self.rman_materials.get(mat.original, None)
if rman_sg_material:
scenegraph_utils.set_material(group.sg_node, rman_sg_material.sg_node)
else:
mat = object_utils.get_active_material(ob)
if mat:
rman_sg_material = self.rman_materials.get(mat.original, None)
if rman_sg_material and rman_sg_material.sg_node:
scenegraph_utils.set_material(group.sg_node, rman_sg_material.sg_node)
group.is_meshlight = rman_sg_material.has_meshlight
def export_instances_motion(self, obj_selected=None):
origframe = self.bl_scene.frame_current
mb_segs = self.bl_scene.renderman.motion_segments
origframe = self.bl_scene.frame_current
motion_steps = sorted(list(self.motion_steps))
first_sample = False
delta = -motion_steps[0]
for samp, seg in enumerate(motion_steps):
first_sample = (samp == 0)
if seg < 0.0:
self.rman_render.bl_engine.frame_set(origframe - 1, subframe=1.0 + seg)
else:
self.rman_render.bl_engine.frame_set(origframe, subframe=seg)
self.depsgraph.update()
time_samp = seg + delta # get the normlized version of the segment
total = len(self.depsgraph.object_instances)
objFound = False
# update camera
if not first_sample and self.main_camera.is_transforming and seg in self.main_camera.motion_steps:
cam_translator = self.rman_translators['CAMERA']
idx = 0
for i, s in enumerate(self.main_camera.motion_steps):
if s == seg:
idx = i
break
cam_translator.update_transform(self.depsgraph.scene_eval.camera, self.main_camera, idx, time_samp)
for i, ob_inst in enumerate(self.depsgraph.object_instances):
if obj_selected:
if objFound:
break
if ob_inst.is_instance:
if ob_inst.instance_object.name == obj_selected:
objFound = True
elif ob_inst.object.name == obj_selected.name:
objFound = True
if not objFound:
continue
if not ob_inst.show_self:
continue
if first_sample:
# for the first motion sample use _export_instance()
self._export_instance(ob_inst, seg=time_samp)
self.rman_render.stats_mgr.set_export_stats("Exporting instances (%f)" % seg, i/total)
continue
rman_group_translator = self.rman_translators['GROUP']
psys = None
if ob_inst.is_instance:
ob = ob_inst.instance_object.original
psys = ob_inst.particle_system
else:
ob = ob_inst.object
if ob.name_full not in self.moving_objects and not psys:
continue
if ob.type not in ['MESH']:
continue
group_db_name = object_utils.get_group_db_name(ob_inst)
rman_sg_node = self.rman_objects.get(ob.original, None)
if not rman_sg_node:
continue
if not seg in rman_sg_node.motion_steps:
continue
idx = 0
for i, s in enumerate(rman_sg_node.motion_steps):
if s == seg:
idx = i
break
if rman_sg_node.is_transforming or psys:
rman_sg_group = rman_sg_node.instances.get(group_db_name, None)
if rman_sg_group:
rman_group_translator.update_transform_num_samples(rman_sg_group, rman_sg_node.motion_steps ) # should have been set in _export_instances()
rman_group_translator.update_transform_sample( ob_inst, rman_sg_group, idx, time_samp)
self.rman_render.stats_mgr.set_export_stats("Exporting instances (%f)" % seg, i/total)
for ob_original,rman_sg_node in self.rman_objects.items():
ob = ob_original.evaluated_get(self.depsgraph)
psys_translator = self.rman_translators['PARTICLES']
particle_systems = getattr(ob, 'particle_systems', list())
for psys in particle_systems:
ob_psys = self.rman_particles.get(ob.original, dict())
rman_sg_particles = ob_psys.get(psys.settings.original, None)
if rman_sg_particles:
if not seg in rman_sg_particles.motion_steps:
continue
idx = 0
for i, s in enumerate(rman_sg_node.motion_steps):
if s == seg:
idx = i
break
psys_translator.export_deform_sample(rman_sg_particles, ob, psys, idx)
if rman_sg_node.is_deforming and seg in rman_sg_node.deform_motion_steps:
rman_type = rman_sg_node.rman_type
if rman_type in ['MESH', 'FLUID']:
translator = self.rman_translators.get(rman_type, None)
if translator:
idx = 0
for i, s in enumerate(rman_sg_node.deform_motion_steps):
if s == seg:
idx = i
break
translator.export_deform_sample(rman_sg_node, ob, idx)
self.rman_render.bl_engine.frame_set(origframe, subframe=0)
def check_light_local_view(self, ob, rman_sg_node):
if self.is_interactive and self.context.space_data:
if not ob.visible_in_viewport_get(self.context.space_data):
rman_sg_node.sg_node.SetHidden(1)
return True
return False
def check_solo_light(self):
if self.bl_scene.renderman.solo_light:
for light_ob in scene_utils.get_all_lights(self.bl_scene, include_light_filters=False):
rman_sg_node = self.rman_objects.get(light_ob.original, None)
if not rman_sg_node:
continue
rm = light_ob.renderman
if not rm:
continue
if rm.solo:
rman_sg_node.sg_node.SetHidden(0)
else:
rman_sg_node.sg_node.SetHidden(1)
else:
for light_ob in scene_utils.get_all_lights(self.bl_scene, include_light_filters=False):
rman_sg_node = self.rman_objects.get(light_ob.original, None)
if not rman_sg_node:
continue
rm = light_ob.renderman
if not rm:
continue
if self.check_light_local_view(light_ob, rman_sg_node):
return
if self.is_interactive:
if not light_ob.hide_get():
rman_sg_node.sg_node.SetHidden(rm.mute)
else:
rman_sg_node.sg_node.SetHidden(1)
else:
rman_sg_node.sg_node.SetHidden(rm.mute)
def export_searchpaths(self):
# TODO
# RMAN_ARCHIVEPATH,
# RMAN_DISPLAYPATH, RMAN_PROCEDURALPATH, and RMAN_DSOPATH (combines procedurals and displays)
# get cycles shader directory
cycles_shader_dir = filepath_utils.get_cycles_shader_path()
RMAN_SHADERPATH = envconfig().getenv('RMAN_SHADERPATH', '')
RMAN_TEXTUREPATH = envconfig().getenv('RMAN_TEXTUREPATH', '')
RMAN_RIXPLUGINPATH = envconfig().getenv('RMAN_RIXPLUGINPATH', '')
if sys.platform == ("win32"):
# substitute ; for : in paths
RMAN_SHADERPATH = RMAN_SHADERPATH.replace(';', ':')
RMAN_TEXTUREPATH = RMAN_TEXTUREPATH.replace(';', ':')
RMAN_RIXPLUGINPATH = RMAN_RIXPLUGINPATH.replace(';', ':')
options = self.sg_scene.GetOptions()
options.SetString(self.rman.Tokens.Rix.k_searchpath_shader, '.:%s:%s:@' % (cycles_shader_dir, RMAN_SHADERPATH))
options.SetString(self.rman.Tokens.Rix.k_searchpath_texture, '.:%s:@' % RMAN_TEXTUREPATH)
options.SetString(self.rman.Tokens.Rix.k_searchpath_rixplugin, '.:%s:@' % RMAN_RIXPLUGINPATH)
options.SetString(self.rman.Tokens.Rix.k_searchpath_display, '.:@')
self.sg_scene.SetOptions(options)
def export_hider(self):
options = self.sg_scene.GetOptions()
rm = self.bl_scene.renderman
if self.rman_bake:
options.SetString(self.rman.Tokens.Rix.k_hider_type, self.rman.Tokens.Rix.k_bake)
bakemode = rm.rman_bake_mode.lower()
primvar_s = rm.rman_bake_illum_primvarS
if primvar_s == '':
primvar_s = 's'
primvar_t = rm.rman_bake_illum_primvarT
if primvar_t == '':
primvar_t = 't'
invert_t = rm.rman_bake_illum_invertT
options.SetString(self.rman.Tokens.Rix.k_hider_bakemode, bakemode)
options.SetStringArray(self.rman.Tokens.Rix.k_hider_primvar, (primvar_s, primvar_t), 2)
options.SetInteger(self.rman.Tokens.Rix.k_hider_invert, invert_t)
else:
pv = rm.ri_pixelVariance
options.SetInteger(self.rman.Tokens.Rix.k_hider_minsamples, rm.hider_minSamples)
options.SetInteger(self.rman.Tokens.Rix.k_hider_maxsamples, rm.hider_maxSamples)
options.SetInteger(self.rman.Tokens.Rix.k_hider_incremental, rm.hider_incremental)
if self.is_interactive:
options.SetInteger(self.rman.Tokens.Rix.k_hider_decidither, rm.hider_decidither)
options.SetInteger(self.rman.Tokens.Rix.k_hider_minsamples, rm.ipr_hider_minSamples)
options.SetInteger(self.rman.Tokens.Rix.k_hider_maxsamples, rm.ipr_hider_maxSamples)
options.SetInteger(self.rman.Tokens.Rix.k_hider_incremental, 1)
pv = rm.ipr_ri_pixelVariance
if (not self.external_render and rm.render_into == 'blender') or rm.enable_checkpoint:
options.SetInteger(self.rman.Tokens.Rix.k_hider_incremental, 1)
if not rm.sample_motion_blur:
options.SetInteger(self.rman.Tokens.Rix.k_hider_samplemotion, 0)
options.SetFloat(self.rman.Tokens.Rix.k_Ri_PixelVariance, pv)
dspys_dict = display_utils.get_dspy_dict(self)
anyDenoise = False
for dspy,params in dspys_dict['displays'].items():
if params['denoise']:
anyDenoise = True
break
if anyDenoise:
options.SetString(self.rman.Tokens.Rix.k_hider_pixelfiltermode, 'importance')
self.sg_scene.SetOptions(options)
def export_global_options(self):
rm = self.bl_scene.renderman
options = self.sg_scene.GetOptions()
# set any properties marked riopt in the config file
for prop_name, meta in rm.prop_meta.items():
if 'riopt' not in meta:
continue
val = getattr(rm, prop_name)
ri_name = meta['riopt']
is_array = False
array_len = -1
if 'arraySize' in meta:
is_array = True
array_len = meta['arraySize']
if type(val) == str and val.startswith('['):
val = eval(val)
param_type = meta['renderman_type']
if param_type == "string":
val = string_utils.expand_string(val, asFilePath=True)
property_utils.set_rix_param(options, param_type, ri_name, val, is_reference=False, is_array=is_array, array_len=array_len, node=rm)
# threads
if not self.external_render:
options.SetInteger(self.rman.Tokens.Rix.k_limits_threads, rm.threads)
# pixelfilter
options.SetString(self.rman.Tokens.Rix.k_Ri_PixelFilterName, rm.ri_displayFilter)
options.SetFloatArray(self.rman.Tokens.Rix.k_Ri_PixelFilterWidth, (rm.ri_displayFilterSize[0], rm.ri_displayFilterSize[1]), 2)
# checkpointing
if not self.is_interactive and rm.enable_checkpoint:
if rm.checkpoint_interval != '':
interval_tokens = rm.checkpoint_interval.split()
if len(interval_tokens) > 0:
options.SetStringArray(self.rman.Tokens.Rix.k_checkpoint_interval, interval_tokens, len(interval_tokens) )
if rm.checkpoint_exitat != '':
exitat_tokens = rm.checkpoint_exitat.split()
if len(exitat_tokens) > 0:
options.SetStringArray(self.rman.Tokens.Rix.k_checkpoint_interval, exitat_tokens, len(exitat_tokens) )
options.SetInteger(self.rman.Tokens.Rix.k_checkpoint_asfinal, int(rm.checkpoint_asfinal))
# Set frame number
options.SetInteger(self.rman.Tokens.Rix.k_Ri_Frame, self.bl_scene.frame_current)
# Always turn off xml stats when in interactive
if self.is_interactive:
options.SetInteger(self.rman.Tokens.Rix.k_statistics_level, 0)
# Set bucket shape
bucket_order = rm.opt_bucket_order.lower()
bucket_orderorigin = []
if rm.enable_checkpoint and not self.is_interactive:
bucket_order = 'horizontal'
elif rm.opt_bucket_order == 'spiral':
settings = self.bl_scene.render
if rm.opt_bucket_sprial_x <= settings.resolution_x and rm.opt_bucket_sprial_y <= settings.resolution_y:
if rm.opt_bucket_sprial_x == -1:
halfX = settings.resolution_x / 2
bucket_orderorigin = [int(halfX), rm.opt_bucket_sprial_y]
elif rm.opt_bucket_sprial_y == -1:
halfY = settings.resolution_y / 2
bucket_orderorigin = [rm.opt_bucket_sprial_y, int(halfY)]
else:
bucket_orderorigin = [rm.opt_bucket_sprial_x, rm.opt_bucket_sprial_y]
options.SetString(self.rman.Tokens.Rix.k_bucket_order, bucket_order)
if bucket_orderorigin:
options.SetFloatArray(self.rman.Tokens.Rix.k_bucket_orderorigin, bucket_orderorigin, 2)
# Shutter
if rm.motion_blur:
shutter_interval = rm.shutter_angle / 360.0
'''
if rm.shutter_timing == 'FRAME_CENTER':
shutter_open, shutter_close = 0 - .5 * \
shutter_interval, 0 + .5 * shutter_interval
elif rm.shutter_timing == 'FRAME_CLOSE':
shutter_open, shutter_close = 0 - shutter_interval, 0
elif rm.shutter_timing == 'FRAME_OPEN':
shutter_open, shutter_close = 0, shutter_interval
'''
shutter_open, shutter_close = 0, shutter_interval
options.SetFloatArray(self.rman.Tokens.Rix.k_Ri_Shutter, (shutter_open, shutter_close), 2)
# dirmaps
dirmaps = ''
for k in rfb_config['dirmaps']:
dirmap = rfb_config['dirmaps'][k]
d = "[ \"%s\" \"%s\" \"%s\"]" % (dirmap['zone'], dirmap['from'], dirmap['to'])
dirmaps += d
if dirmaps:
options.SetString('searchpath:dirmap', dirmaps)
# colorspace
ocioconfig = color_manager_blender.get_config_path()
ociocolorspacename = color_manager_blender.get_colorspace_name()
options.SetString('user:ocioconfigpath', ocioconfig)
options.SetString('user:ociocolorspacename', ociocolorspacename)
self.sg_scene.SetOptions(options)
def export_integrator(self):
world = self.bl_scene.world
rm = world.renderman
bl_integrator_node = shadergraph_utils.find_integrator_node(world)
if bl_integrator_node:
integrator_sg = self.rman.SGManager.RixSGShader("Integrator", bl_integrator_node.bl_label, "integrator")
rman_sg_node = RmanSgNode(self, integrator_sg, "")
property_utils.property_group_to_rixparams(bl_integrator_node, rman_sg_node, integrator_sg, ob=world)
else:
integrator_sg = self.rman.SGManager.RixSGShader("Integrator", "PxrPathTracer", "integrator")
self.sg_scene.SetIntegrator(integrator_sg)
def export_cameras(self, bl_cameras):
main_cam = self.depsgraph.scene_eval.camera
cam_translator = self.rman_translators['CAMERA']
if self.is_viewport_render:
db_name = 'main_camera'
self.main_camera = cam_translator.export(None, db_name)
self.main_camera.sg_camera_node.SetRenderable(1)
self.sg_scene.Root().AddChild(self.main_camera.sg_node)
# add camera so we don't mistake it for a new obj
if main_cam:
self.rman_cameras[main_cam.original] = self.main_camera
self.rman_objects[main_cam.original] = self.main_camera
self.processed_obs.append(main_cam.original)
else:
if self.is_interactive:
main_cam = self.context.space_data.camera
db_name = object_utils.get_db_name(main_cam)
rman_sg_camera = cam_translator.export(main_cam, db_name)
self.main_camera = rman_sg_camera
if main_cam:
self.rman_cameras[main_cam.original] = rman_sg_camera
self.rman_objects[main_cam.original] = rman_sg_camera
# resolution
cam_translator._update_render_resolution(main_cam, self.main_camera)
self.sg_scene.Root().AddChild(rman_sg_camera.sg_node)
# export all other scene cameras
for cam in bl_cameras:
ob = cam.original
if cam.original in self.rman_cameras:
continue
if cam == main_cam:
if self.main_camera.is_transforming:
self.motion_steps.update(self.main_camera.motion_steps)
continue
db_name = object_utils.get_db_name(ob)
rman_sg_camera = cam_translator._export_render_cam(ob, db_name)
self.rman_cameras[cam.original] = rman_sg_camera
self.rman_objects[cam.original] = rman_sg_camera
self.sg_scene.Root().AddChild(rman_sg_camera.sg_node)
self.sg_scene.Root().AddCoordinateSystem(rman_sg_camera.sg_node)
# For now, make the main camera the 'primary' dicing camera
self.main_camera.sg_camera_node.SetRenderable(1)
self.sg_scene.Root().AddCoordinateSystem(self.main_camera.sg_node)
def export_displayfilters(self):
rm = self.bl_scene.renderman
display_filter_names = []
displayfilters_list = []
world = self.bl_scene.world
output = shadergraph_utils.find_node(world, 'RendermanDisplayfiltersOutputNode')
if not output:
# put in a default background color, using world color, then bail
if not self.world_df_node:
self.world_df_node = self.rman.SGManager.RixSGShader("DisplayFilter", "PxrBackgroundDisplayFilter", "__rman_world_df")
params = self.world_df_node.params
params.SetColor("backgroundColor", self.bl_scene.world.color[:3])
self.sg_scene.SetDisplayFilter([self.world_df_node])
return
for bl_df_node in shadergraph_utils.find_displayfilter_nodes(world):
if not bl_df_node.is_active:
continue
# don't emit stylized filters, if render_rman_stylized is false
if bl_df_node.bl_label in rman_constants.RMAN_STYLIZED_FILTERS and not rm.render_rman_stylized:
continue
df_name = bl_df_node.name
rman_df_node = self.rman.SGManager.RixSGShader("DisplayFilter", bl_df_node.bl_label, df_name)
rman_sg_node = RmanSgNode(self, rman_df_node, "")
property_utils.property_group_to_rixparams(bl_df_node, rman_sg_node, rman_df_node, ob=world)
display_filter_names.append(df_name)
displayfilters_list.append(rman_df_node)
if len(display_filter_names) > 1:
df_name = "rman_displayfilter_combiner"
df_node = self.rman.SGManager.RixSGShader("DisplayFilter", "PxrDisplayFilterCombiner", df_name)
params = df_node.params
params.SetDisplayFilterReferenceArray("filter", display_filter_names, len(display_filter_names))
displayfilters_list.append(df_node)
self.sg_scene.SetDisplayFilter(displayfilters_list)
def export_samplefilters(self, sel_chan_name=None):
rm = self.bl_scene.renderman
sample_filter_names = []
samplefilters_list = list()
if rm.do_holdout_matte != "OFF" and not self.is_viewport_render:
sf_node = self.rman.SGManager.RixSGShader("SampleFilter", "PxrShadowFilter", "rm_PxrShadowFilter_shadows")
params = sf_node.params
params.SetString("occludedAov", "occluded")
params.SetString("unoccludedAov", "holdoutMatte")
if rm.do_holdout_matte == "ALPHA":
params.SetString("shadowAov", "a")
else:
params.SetString("shadowAov", "holdoutMatte")
sample_filter_names.append("rm_PxrShadowFilter_shadows")
samplefilters_list.append(sf_node)
world = self.bl_scene.world
for bl_sf_node in shadergraph_utils.find_samplefilter_nodes(world):
if not bl_sf_node.is_active:
continue
sf_name = bl_sf_node.name
rman_sf_node = self.rman.SGManager.RixSGShader("SampleFilter", bl_sf_node.bl_label, sf_name)
rman_sg_node = RmanSgNode(self, rman_sf_node, "")
property_utils.property_group_to_rixparams(bl_sf_node, rman_sg_node, rman_sf_node, ob=world)
sample_filter_names.append(sf_name)
samplefilters_list.append(rman_sf_node)
if sel_chan_name:
sf_name = '__RMAN_VIEWPORT_CHANNEL_SELECT__'
rman_sel_chan_node = self.rman.SGManager.RixSGShader("SampleFilter", "PxrCopyAOVSampleFilter", sf_name)
params = rman_sel_chan_node.params
params.SetString("readAov", sel_chan_name)
sample_filter_names.append(sf_name)
samplefilters_list.append(rman_sel_chan_node)
if len(sample_filter_names) > 1:
sf_name = "rman_samplefilter_combiner"
sf_node = self.rman.SGManager.RixSGShader("SampleFilter", "PxrSampleFilterCombiner", sf_name)
params = sf_node.params
params.SetSampleFilterReferenceArray("filter", sample_filter_names, len(sample_filter_names))
samplefilters_list.append(sf_node)
self.sg_scene.SetSampleFilter(samplefilters_list)
def export_bake_displays(self):
rm = self.bl_scene.renderman
sg_displays = []
displaychannels = []
display_driver = None
cams_to_dspys = dict()
dspys_dict = display_utils.get_dspy_dict(self)
for chan_name, chan_params in dspys_dict['channels'].items():
chan_type = chan_params['channelType']['value']
chan_source = chan_params['channelSource']['value']
chan_remap_a = chan_params['remap_a']['value']
chan_remap_b = chan_params['remap_b']['value']
chan_remap_c = chan_params['remap_c']['value']
chan_exposure = chan_params['exposure']['value']
chan_filter = chan_params['filter']['value']
chan_filterwidth = chan_params['filterwidth']['value']
chan_statistics = chan_params['statistics']['value']
displaychannel = self.rman.SGManager.RixSGDisplayChannel(chan_type, chan_name)
if chan_source:
if "lpe" in chan_source:
displaychannel.params.SetString(self.rman.Tokens.Rix.k_source, '%s %s' % (chan_type, chan_source))
else:
displaychannel.params.SetString(self.rman.Tokens.Rix.k_source, chan_source)
displaychannel.params.SetFloatArray("exposure", chan_exposure, 2)
displaychannel.params.SetFloatArray("remap", [chan_remap_a, chan_remap_b, chan_remap_c], 3)
if chan_filter != 'default':
displaychannel.params.SetString("filter", chan_filter)
displaychannel.params.SetFloatArray("filterwidth", chan_filterwidth, 2 )
if chan_statistics and chan_statistics != 'none':
displaychannel.params.SetString("statistics", chan_statistics)
displaychannels.append(displaychannel)
# baking requires we only do one channel per display. So, we create a new display
# for each channel
for dspy,dspy_params in dspys_dict['displays'].items():
if not dspy_params['bake_mode']:
continue
display_driver = dspy_params['driverNode']
channels = (dspy_params['params']['displayChannels'])
if not dspy_params['bake_mode']:
# if bake is off for this aov, just render to the null display driver
dspy_file_name = dspy_params['filePath']
display = self.rman.SGManager.RixSGShader("Display", "null", dspy_file_name)
channels = ','.join(channels)
display.params.SetString("mode", channels)
cam_dspys = cams_to_dspys.get(self.main_camera, list())
cam_dspys.append(display)
cams_to_dspys[self.main_camera] = cam_dspys
else:
for chan in channels:
chan_type = dspys_dict['channels'][chan]['channelType']['value']
if chan_type != 'color':
# we can only bake color channels
continue
dspy_file_name = dspy_params['filePath']
if rm.rman_bake_illum_filename == 'BAKEFILEATTR':
tokens = os.path.splitext(dspy_file_name)
if tokens[1] == '':
token_dict = {'aov': dspy}
dspy_file_name = string_utils.expand_string('%s.<ext>' % dspy_file_name,
display=display_driver,
token_dict=token_dict
)
else:
tokens = os.path.splitext(dspy_file_name)
dspy_file_name = '%s.%s%s' % (tokens[0], chan, tokens[1])
display = self.rman.SGManager.RixSGShader("Display", display_driver, dspy_file_name)
dspydriver_params = dspy_params['dspyDriverParams']
if dspydriver_params:
display.params.Inherit(dspydriver_params)
display.params.SetString("mode", chan)
if display_driver in ['deepexr', 'openexr']:
if rm.use_metadata:
display_utils.export_metadata(self.bl_scene, display.params)
camera = dspy_params['camera']
if camera is None:
cam_dspys = cams_to_dspys.get(self.main_camera, list())
cam_dspys.append(display)
cams_to_dspys[self.main_camera] = cam_dspys
else:
#db_name = object_utils.get_db_name(camera)
if camera not in self.rman_cameras:
cam_dspys = cams_to_dspys.get(self.main_camera, list())
cam_dspys.append(display)
cams_to_dspys[self.main_camera] = cam_dspys
else:
cam_sg_node = self.rman_cameras.get(camera)
cam_dspys = cams_to_dspys.get(cam_sg_node, list())
cam_dspys.append(display)
cams_to_dspys[cam_sg_node] = cam_dspys
for cam_sg_node,cam_dspys in cams_to_dspys.items():
#cam = self.rman_cameras.get(db_name, None)
if not cam_sg_node:
continue
if cam_sg_node != self.main_camera:
cam_sg_node.sg_camera_node.SetRenderable(2)
cam_sg_node.sg_camera_node.SetDisplay(cam_dspys)
self.sg_scene.SetDisplayChannel(displaychannels)
def export_displays(self):
rm = self.bl_scene.renderman
sg_displays = []
displaychannels = []
display_driver = None
cams_to_dspys = dict()
dspys_dict = display_utils.get_dspy_dict(self)
for chan_name, chan_params in dspys_dict['channels'].items():
chan_type = chan_params['channelType']['value']
chan_source = chan_params['channelSource']['value']
chan_remap_a = chan_params['remap_a']['value']
chan_remap_b = chan_params['remap_b']['value']
chan_remap_c = chan_params['remap_c']['value']
chan_exposure = chan_params['exposure']['value']
chan_filter = chan_params['filter']['value']
chan_filterwidth = chan_params['filterwidth']['value']
chan_statistics = chan_params['statistics']['value']
chan_shadowthreshold = chan_params['shadowthreshold']['value']
if chan_type == 'float[2]':
chan_type = self.rman.Tokens.Rix.k_float2
displaychannel = self.rman.SGManager.RixSGDisplayChannel(chan_type, chan_name)
if chan_source and chan_source != '':
if "lpe" in chan_source:
displaychannel.params.SetString(self.rman.Tokens.Rix.k_source, '%s %s' % (chan_type, chan_source))
else:
displaychannel.params.SetString(self.rman.Tokens.Rix.k_source, '%s' % (chan_source))
displaychannel.params.SetFloatArray("exposure", chan_exposure, 2)
displaychannel.params.SetFloatArray("remap", [chan_remap_a, chan_remap_b, chan_remap_c], 3)
displaychannel.params.SetFloat("shadowthreshold", chan_shadowthreshold)
if chan_filter != 'default':
displaychannel.params.SetString("filter", chan_filter)
displaychannel.params.SetFloatArray("filterwidth", chan_filterwidth, 2 )
if chan_statistics and chan_statistics != 'none':
displaychannel.params.SetString("statistics", chan_statistics)
displaychannels.append(displaychannel)
for dspy,dspy_params in dspys_dict['displays'].items():
display_driver = dspy_params['driverNode']
dspy_file_name = dspy_params['filePath']
display = self.rman.SGManager.RixSGShader("Display", display_driver, dspy_file_name)
channels = ','.join(dspy_params['params']['displayChannels'])
dspydriver_params = dspy_params['dspyDriverParams']
if dspydriver_params:
display.params.Inherit(dspydriver_params)
display.params.SetString("mode", channels)
if display_driver == "it":
dspy_info = display_utils.make_dspy_info(self.bl_scene)
port = self.rman_render.it_port
dspy_callback = "dspyRender"
if self.is_interactive:
dspy_callback = "dspyIPR"
display.params.SetString("dspyParams",
"%s -port %d -crop 1 0 1 0 -notes %s" % (dspy_callback, port, dspy_info))
cam_sg_node = self.main_camera
camera = dspy_params['camera']
if camera and camera in self.rman_cameras:
cam_sg_node = self.rman_cameras.get(camera)
if display_driver in ['deepexr', 'openexr']:
if rm.use_metadata:
display_utils.export_metadata(self.bl_scene, display.params, camera_name=cam_sg_node.db_name)
if not dspy_params['denoise']:
display.params.SetInteger("asrgba", 1)
cam_dspys = cams_to_dspys.get(cam_sg_node, list())
cam_dspys.append(display)
cams_to_dspys[cam_sg_node] = cam_dspys
for cam_sg_node,cam_dspys in cams_to_dspys.items():
#cam = self.rman_cameras.get(db_name, None)
if not cam_sg_node:
continue
if cam_sg_node != self.main_camera:
cam_sg_node.sg_camera_node.SetRenderable(2)
cam_sg_node.sg_camera_node.SetDisplay(cam_dspys)
self.sg_scene.SetDisplayChannel(displaychannels)
def export_stats(self):
stats_mgr = self.rman_render.stats_mgr
rm = self.bl_scene.renderman
integrator = 'PxrPathTracer'
world = self.bl_scene.world
bl_integrator_node = shadergraph_utils.find_integrator_node(world)
if bl_integrator_node:
integrator = bl_integrator_node.bl_label
stats_mgr._integrator = integrator
#stats_mgr._minSamples = rm.hider_minSamples
stats_mgr._maxSamples = rm.hider_maxSamples
def export_viewport_stats(self, integrator=''):
stats_mgr = self.rman_render.stats_mgr
rm = self.bl_scene.renderman
if integrator == '':
integrator = 'PxrPathTracer'
world = self.bl_scene.world
bl_integrator_node = shadergraph_utils.find_integrator_node(world)
if bl_integrator_node:
integrator = bl_integrator_node.bl_label
stats_mgr._integrator = integrator
#stats_mgr._minSamples = rm.ipr_hider_minSamples
stats_mgr._maxSamples = rm.ipr_hider_maxSamples
stats_mgr._decidither = rm.hider_decidither
stats_mgr._res_mult = int(self.viewport_render_res_mult*100)
|
prman-pixar/RenderManForBlender | rman_ui/rman_ui_volume_panels.py | from .rman_ui_base import CollectionPanel
from ..rfb_utils.draw_utils import _draw_ui_from_rman_config
from ..rfb_utils import object_utils
from ..rfb_logger import rfb_log
from bpy.types import Panel
import bpy
class VOLUME_PT_renderman_openvdb_attributes(CollectionPanel, Panel):
bl_context = "data"
bl_label = "OpenVDB"
@classmethod
def poll(cls, context):
if not context.volume:
return False
return CollectionPanel.poll(context)
def draw(self, context):
layout = self.layout
self.layout.use_property_split = True
self.layout.use_property_decorate = False
volume = context.volume
rm = volume.renderman
layout.prop(rm, 'openvdb_velocity_grid_name')
_draw_ui_from_rman_config('rman_properties_volume', 'VOLUME_PT_renderman_openvdb_attributes', context, layout, rm)
classes = [
VOLUME_PT_renderman_openvdb_attributes
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass |
prman-pixar/RenderManForBlender | rman_operators/rman_operators_editors/rman_operators_editors_stylized.py | from bpy.props import (StringProperty, BoolProperty, EnumProperty, IntProperty)
from ...rfb_utils.draw_utils import draw_node_properties_recursive
from ...rfb_utils import shadergraph_utils
from ...rfb_utils import object_utils
from ...rfb_logger import rfb_log
from ... import rfb_icons
from ...rman_operators.rman_operators_collections import return_empty_list
from ...rman_config import __RFB_CONFIG_DICT__ as rfb_config
import bpy
import re
class PRMAN_OT_Renderman_Open_Stylized_Help(bpy.types.Operator):
bl_idname = "renderman.rman_stylized_help"
bl_label = "Stylized Help"
bl_description = "Get help on how to use RenderMan Stylzied Looks"
def execute(self, context):
return{'FINISHED'}
def draw(self, context):
layout = self.layout
box = layout.box()
box.scale_y = 0.4
rman_icon = rfb_icons.get_node_icon('PxrStylizedControl')
box.label(text="RenderMan Stylized Looks HOWTO", icon_value = rman_icon.icon_id)
rman_icon = rfb_icons.get_icon('help_stylized_1')
box.template_icon(rman_icon.icon_id, scale=10.0)
box.label(text="")
box.label(text="To start using RenderMan Stylized Looks, click the Enable Stylized Looks.")
box.label(text="")
box.label(text="Stylized looks requires BOTH a stylized pattern node")
box.label(text="be connected in an object's shading material network")
box.label(text="and one of the stylized display filters be present in the scene.")
box.label(text="")
box.label(text="In the RenderMan Stylized Editor, the Patterns tab allows you to")
box.label(text="search for an object in the scene and attach a PxrStylizedControl pattern.")
box.label(text="You can use the drop down list or do a filter search to select the object you want to stylized.")
box.label(text="If no material is present, a PxrSurface material will automatically be created for you.")
box.label(text="The stylized pattern allows for per-object control.")
box.label(text="")
box.label(text="The Filters tab allows you to add one of the stylized display filters.")
box.label(text="The filters can be turned on and off, individually.")
box.label(text="As mentioned in earlier, both the patterns and the filters need to be present.")
box.label(text="So you need to add at least one filter for the stylized looks to work.")
rman_help = rfb_icons.get_icon("rman_help")
split = layout.split(factor=0.98)
row = split.row()
col = row.column()
col = row.column()
col.label(text="")
row.operator("wm.url_open", text="RenderMan Docs",
icon_value=rman_help.icon_id).url = "https://rmanwiki.pixar.com/display/RFB24"
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self, width=500)
class PRMAN_OT_Renderman_Open_Stylized_Editor(bpy.types.Operator):
bl_idname = "scene.rman_open_stylized_editor"
bl_label = "RenderMan Stylized Editor"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine in {'PRMAN_RENDER'}
def updated_object_selected_name(self, context):
ob = context.scene.objects.get(self.selected_obj_name, None)
if not ob:
return
if context.view_layer.objects.active:
context.view_layer.objects.active.select_set(False)
ob.select_set(True)
context.view_layer.objects.active = ob
def obj_list_items(self, context):
pattern = re.compile(self.object_search_filter)
scene = context.scene
rm = scene.renderman
if self.do_object_filter and self.object_search_filter == '':
return return_empty_list(label='No Objects Found')
items = []
for ob in context.scene.objects:
if ob.type in ['LIGHT', 'CAMERA']:
continue
mat = object_utils.get_active_material(ob)
if not mat:
items.append((ob.name, ob.name, ''))
continue
if not shadergraph_utils.is_renderman_nodetree(mat):
items.append((ob.name, ob.name, ''))
continue
if self.do_object_filter and not re.match(pattern, ob.name):
continue
if not shadergraph_utils.has_stylized_pattern_node(ob):
items.append((ob.name, ob.name, ''))
if not items:
return return_empty_list(label='No Objects Found')
elif self.do_object_filter:
items.insert(0, ('0', 'Results (%d)' % len(items), '', '', 0))
else:
items.insert(0, ('0', 'Select Object', '', '', 0))
return items
def update_do_object_filter(self, context):
self.selected_obj_name = '0'
do_object_filter: BoolProperty(name="Object Filter",
description="Search and add multiple objects",
default=False,
update=update_do_object_filter)
object_search_filter: StringProperty(name="Object Filter Search", default="")
selected_obj_name: EnumProperty(name="", items=obj_list_items, update=updated_object_selected_name)
def current_filters(self, context):
items = []
scene = context.scene
world = scene.world
nt = world.node_tree
nodes = shadergraph_utils.find_all_stylized_filters(world)
for node in nodes:
items.append((node.name, node.name, ""))
if len(items) < 1:
items.append(('0', '', '', '', 0))
return items
stylized_filter: EnumProperty(
name="",
items=current_filters
)
stylized_tabs: EnumProperty(
name="",
items=[
('patterns', 'Patterns', 'Add or edit stylized patterns attached to objects in the scene'),
('filters', 'Filters', 'Add or edit stylized display filters in the scene'),
]
)
def get_stylized_objects(self, context):
items = []
scene = context.scene
for ob in scene.objects:
node = shadergraph_utils.has_stylized_pattern_node(ob)
if node:
items.append((ob.name, ob.name, ''))
if len(items) < 1:
items.append(('0', '', '', '', 0))
return items
stylized_objects: EnumProperty(
name="",
items=get_stylized_objects
)
def execute(self, context):
return{'FINISHED'}
def draw_patterns_tab(self, context):
scene = context.scene
rm = scene.renderman
selected_objects = context.selected_objects
layout = self.layout
row = layout.row()
row.separator()
row.prop(self, 'do_object_filter', text='', icon='FILTER', icon_only=True)
if not self.do_object_filter:
row.prop(self, 'selected_obj_name', text='')
col = row.column()
if self.selected_obj_name == '0' or self.selected_obj_name == '':
pass
else:
col.context_pointer_set('op_ptr', self)
col.context_pointer_set('selected_obj', scene.objects[self.selected_obj_name])
col.operator_menu_enum('node.rman_attach_stylized_pattern', 'stylized_pattern')
else:
row.prop(self, 'object_search_filter', text='', icon='VIEWZOOM')
row = layout.row()
row.prop(self, 'selected_obj_name')
col = row.column()
if self.selected_obj_name == '0' or self.selected_obj_name == '':
pass
else:
col.context_pointer_set('op_ptr', self)
col.context_pointer_set('selected_obj', scene.objects[self.selected_obj_name])
col.operator_menu_enum('node.rman_attach_stylized_pattern', 'stylized_pattern')
if self.properties.stylized_objects != '0':
layout.separator()
row = layout.row(align=True)
col = row.column()
col.label(text='Stylized Objects')
row = layout.row(align=True)
col = row.column()
col.prop(self, 'stylized_objects')
ob = scene.objects.get(self.properties.stylized_objects, None)
node = shadergraph_utils.has_stylized_pattern_node(ob)
mat = object_utils.get_active_material(ob)
col.separator()
col.label(text=node.name)
col.separator()
draw_node_properties_recursive(layout, context, mat.node_tree, node, level=1)
def draw_filters_tab(self, context):
scene = context.scene
world = scene.world
nt = world.node_tree
layout = self.layout
row = layout.row(align=True)
col = row.column()
col.context_pointer_set('op_ptr', self)
col.operator_menu_enum('node.rman_add_stylized_filter', 'filter_name')
layout.separator()
output = shadergraph_utils.find_node(world, 'RendermanDisplayfiltersOutputNode')
if not output:
row = layout.row()
row.label(text="No Stylized Filters")
return
layout.separator()
row = layout.row()
row.label(text="Scene Filters")
row = layout.row()
layout.prop(self, 'stylized_filter')
selected_stylized_node = None
if self.properties.stylized_filter != '':
nodes = shadergraph_utils.find_all_stylized_filters(world)
for node in nodes:
if node.name == self.properties.stylized_filter:
selected_stylized_node = node
break
if selected_stylized_node:
rman_icon = rfb_icons.get_displayfilter_icon(node.bl_label)
layout.prop(selected_stylized_node, "is_active")
layout.prop(node, 'name')
if selected_stylized_node.is_active:
draw_node_properties_recursive(layout, context, nt, selected_stylized_node, level=1)
def draw(self, context):
layout = self.layout
scene = context.scene
rm = scene.renderman
split = layout.split()
row = split.row()
col = row.column()
col.prop(rm, 'render_rman_stylized', text='Enable Stylized Looks')
col = row.column()
icon = rfb_icons.get_icon('rman_help')
col.operator("renderman.rman_stylized_help", text="", icon_value=icon.icon_id)
if not rm.render_rman_stylized:
return
row = layout.row(align=True)
row.prop_tabs_enum(self, 'stylized_tabs', icon_only=False)
if self.properties.stylized_tabs == "patterns":
self.draw_patterns_tab(context)
else:
self.draw_filters_tab(context)
def cancel(self, context):
if self.event and self.event.type == 'LEFTMOUSE':
bpy.ops.scene.rman_open_stylized_editor('INVOKE_DEFAULT')
def __init__(self):
self.event = None
def invoke(self, context, event):
wm = context.window_manager
width = rfb_config['editor_preferences']['stylizedlooks_editor']['width']
self.event = event
return wm.invoke_props_dialog(self, width=width)
classes = [
PRMAN_OT_Renderman_Open_Stylized_Help,
PRMAN_OT_Renderman_Open_Stylized_Editor
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass |
prman-pixar/RenderManForBlender | rfb_utils/rman_socket_utils.py | from ..rman_constants import RFB_ARRAYS_MAX_LEN, __RMAN_SOCKET_MAP__
from .shadergraph_utils import has_lobe_enable_props
def update_inputs(node):
if node.bl_idname == 'PxrMeshLightLightNode':
return
for prop_name in node.prop_names:
page_name = prop_name
if node.prop_meta[page_name]['renderman_type'] == 'page':
for prop_name in getattr(node, page_name):
if prop_name.startswith('enable'):
recursive_enable_inputs(node, getattr(
node, page_name), getattr(node, prop_name))
break
def recursive_enable_inputs(node, prop_names, enable=True):
for prop_name in prop_names:
if type(prop_name) == str and node.prop_meta[prop_name]['renderman_type'] == 'page':
recursive_enable_inputs(node, getattr(node, prop_name), enable)
elif hasattr(node, 'inputs') and prop_name in node.inputs:
node.inputs[prop_name].hide = not enable
else:
continue
def find_enable_param(params):
for prop_name in params:
if prop_name.startswith('enable'):
return prop_name
def node_add_inputs(node, node_name, prop_names, first_level=True, label_prefix='', remove=False):
''' Add new input sockets to this ShadingNode
'''
for name in prop_names:
meta = node.prop_meta[name]
param_type = meta['renderman_type']
param_type = getattr(meta, 'renderman_array_type', param_type)
if name in node.inputs.keys() and remove:
node.inputs.remove(node.inputs[name])
continue
elif name in node.inputs.keys():
continue
# if this is a page recursively add inputs
if 'renderman_type' in meta and meta['renderman_type'] == 'page':
if first_level and has_lobe_enable_props(node) and name != 'Globals':
# add these
enable_param = find_enable_param(getattr(node, name))
if enable_param and getattr(node, enable_param):
node_add_inputs(node, node_name, getattr(node, name),
label_prefix=name + ' ',
first_level=False)
else:
node_add_inputs(node, node_name, getattr(node, name),
label_prefix=name + ' ',
first_level=False, remove=True)
continue
else:
node_add_inputs(node, node_name, getattr(node, name),
first_level=first_level,
label_prefix=label_prefix, remove=remove)
continue
elif 'renderman_type' in meta and meta['renderman_type'] == 'array':
arraylen = getattr(node, '%s_arraylen' % name)
sub_prop_names = getattr(node, name)
sub_prop_names = sub_prop_names[:arraylen]
node_add_inputs(node, node_name, sub_prop_names,
label_prefix='',
first_level=False, remove=False)
continue
if remove:
continue
# # if this is not connectable don't add socket
if param_type not in __RMAN_SOCKET_MAP__:
continue
if '__noconnection' in meta and meta['__noconnection']:
continue
param_name = name
param_label = label_prefix + meta.get('label', param_name)
socket = node.inputs.new(
__RMAN_SOCKET_MAP__[param_type], param_name, identifier=param_label)
socket.link_limit = 1
#socket.default_value = meta['default_value']
if param_type in ['struct', 'normal', 'vector', 'vstruct', 'void']:
socket.hide_value = True
if param_type == 'struct':
struct_name = meta.get('struct_name', 'Manifold')
socket.struct_name = struct_name
update_inputs(node)
# add output sockets
def node_add_outputs(node):
for name, meta in node.output_meta.items():
rman_type = meta['renderman_type']
is_vstruct = meta.get('vstruct', False)
if rman_type in __RMAN_SOCKET_MAP__ and 'vstructmember' not in meta:
if is_vstruct:
rman_type = 'vstruct'
socket = node.outputs.new(__RMAN_SOCKET_MAP__[rman_type], name)
socket.renderman_type = rman_type
if rman_type == 'struct':
struct_name = meta.get('struct_name', 'Manifold')
socket.struct_name = struct_name |
prman-pixar/RenderManForBlender | rman_operators/rman_operators_utils.py | <gh_stars>100-1000
from ..rfb_logger import rfb_log
from ..rfb_utils import shadergraph_utils
from ..rfb_utils import string_utils
from ..rfb_utils import texture_utils
from ..rfb_utils import filepath_utils
from bpy.types import Operator
from bpy.props import StringProperty
import os
import zipfile
import bpy
import shutil
class PRMAN_OT_Renderman_Package(Operator):
"""An operator to create a zip archive of the current scene."""
bl_idname = "renderman.scene_package"
bl_label = "Package Scene"
bl_description = "Package your scene including textures into a zip file."
bl_options = {'INTERNAL'}
directory: bpy.props.StringProperty(subtype='FILE_PATH')
filepath: bpy.props.StringProperty(
subtype="FILE_PATH")
filename: bpy.props.StringProperty(
subtype="FILE_NAME",
default="")
filter_glob: bpy.props.StringProperty(
default="*.zip",
options={'HIDDEN'},
)
@classmethod
def poll(cls, context):
return context.engine == "PRMAN_RENDER"
def execute(self, context):
if not os.access(self.directory, os.W_OK):
self.report({"ERROR"}, "Directory is not writable")
return {'FINISHED'}
if not bpy.data.is_saved:
self.report({"ERROR"}, "Scene not saved yet.")
return {'FINISHED'}
z = zipfile.ZipFile(self.filepath, mode='w')
bl_scene_file = bpy.data.filepath
remove_files = list()
remove_dirs = list()
# textures
texture_dir = os.path.join(self.directory, 'textures')
os.mkdir(os.path.join(texture_dir))
remove_dirs.append(texture_dir)
# assets
assets_dir = os.path.join(self.directory, 'assets')
os.mkdir(os.path.join(assets_dir))
remove_dirs.append(assets_dir)
# osl shaders
shaders_dir = os.path.join(self.directory, 'shaders')
os.mkdir(os.path.join(shaders_dir))
remove_dirs.append(shaders_dir)
for item in context.scene.rman_txmgr_list:
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID)
if not txfile:
continue
for fpath, txitem in txfile.tex_dict.items():
bfile = os.path.basename(fpath)
shutil.copyfile(fpath, os.path.join(texture_dir, bfile))
bfile = os.path.basename(txitem.outfile)
diskpath = os.path.join(texture_dir, bfile)
shutil.copyfile(txitem.outfile, diskpath)
z.write(diskpath, arcname=os.path.join('textures', bfile))
remove_files.append(diskpath)
for node in shadergraph_utils.get_all_shading_nodes():
if node.bl_label == 'PxrOSL' and getattr(node, "codetypeswitch") == "EXT":
osl_path = string_utils.expand_string(getattr(node, 'shadercode'))
osl_path = filepath_utils.get_real_path(osl_path)
FileName = os.path.basename(osl_path)
FileNameNoEXT = os.path.splitext(FileName)[0]
diskpath = os.path.join(shaders_dir, FileName)
shutil.copyfile(osl_path, diskpath)
setattr(node, 'shadercode', os.path.join('<blend_dir>', 'shaders', FileName))
z.write(diskpath, arcname=os.path.join('shaders', FileName))
remove_files.append(diskpath)
for prop_name, meta in node.prop_meta.items():
param_type = meta['renderman_type']
if param_type != 'string':
continue
if shadergraph_utils.is_texture_property(prop_name, meta):
prop = getattr(node, prop_name)
if prop != '':
prop = os.path.basename(prop)
setattr(node, prop_name, os.path.join('<blend_dir>', 'textures', prop))
else:
prop = getattr(node, prop_name)
val = string_utils.expand_string(prop)
if os.path.exists(val):
bfile = os.path.basename(val)
diskpath = os.path.join(assets_dir, bfile)
shutil.copyfile(val, diskpath)
setattr(node, prop_name, os.path.join('<blend_dir>', 'assets', bfile))
z.write(diskpath, arcname=os.path.join('assets', bfile))
remove_files.append(diskpath)
# volumes
for db in bpy.data.volumes:
openvdb_file = filepath_utils.get_real_path(db.filepath)
bfile = os.path.basename(openvdb_file)
diskpath = os.path.join(assets_dir, bfile)
shutil.copyfile(openvdb_file, diskpath)
setattr(db, 'filepath', '//./assets/%s' % bfile)
z.write(diskpath, arcname=os.path.join('assets', bfile))
remove_files.append(diskpath)
bl_filepath = os.path.dirname(bl_scene_file)
bl_filename = os.path.basename(bl_scene_file)
bl_filepath = os.path.join(self.directory, bl_filename)
bpy.ops.wm.save_as_mainfile(filepath=bl_filepath, copy=True)
remove_files.append(bl_filepath)
z.write(bl_filepath, arcname=bl_filename)
z.close()
for f in remove_files:
os.remove(f)
for d in remove_dirs:
os.removedirs(d)
bpy.ops.wm.revert_mainfile()
return {'FINISHED'}
def invoke(self, context, event=None):
bl_scene_file = bpy.data.filepath
bl_filename = os.path.splitext(os.path.basename(bl_scene_file))[0]
self.properties.filename = '%s.zip' % bl_filename
context.window_manager.fileselect_add(self)
return{'RUNNING_MODAL'}
classes = [
PRMAN_OT_Renderman_Package
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass |
prman-pixar/RenderManForBlender | rfb_utils/osl_utils.py | <reponame>prman-pixar/RenderManForBlender<gh_stars>100-1000
from ..rfb_logger import rfb_log
import bpy
import re
import os
def readOSO(filePath):
import oslquery as oslq
oinfo = oslq.OslQuery()
oinfo.open(filePath)
shader_meta = {}
prop_names = []
shader_meta["shader"] = oinfo.shadername()
for i in range(oinfo.nparams()):
pdict = oinfo.getparam(i)
name = pdict['name']
type = 'struct' if pdict['isstruct'] else pdict['type']
prop_names.append(name)
IO = "in"
if pdict['isoutput']:
IO = "out"
prop_meta = {"type": type, "IO": IO}
# default
if not pdict['isstruct']:
prop_meta['default'] = pdict['default']
if prop_meta['type'] == 'float':
prop_meta['default'] = float('%g' % prop_meta['default'])
# metadata
for mdict in pdict.get('metadata', []):
if mdict['name'] == 'tag' and mdict['default'] == 'vstruct':
prop_meta['type'] = 'vstruct'
elif mdict['name'] == 'vstructmember':
prop_meta['vstructmember'] = mdict['default']
elif mdict['name'] == 'vstructConditionalExpr':
prop_meta['vstructConditionalExpr'] = mdict['default'].replace(' ', ' ')
elif mdict['name'] == 'match':
prop_meta['match'] = mdict['default']
elif mdict['name'] == 'lockgeom':
prop_meta['lockgeom'] = mdict['lockgeom']
shader_meta[name] = prop_meta
return prop_names, shader_meta |
prman-pixar/RenderManForBlender | rman_operators/rman_operators_printer.py | from bpy.types import Operator
from bpy.props import StringProperty, IntProperty, CollectionProperty, EnumProperty, BoolProperty
import bpy
class PRMAN_OT_Renderman_printer(Operator):
"""An operator to simply print messages."""
bl_idname = "renderman.printer"
bl_label = "RenderMan Message"
bl_options = {'INTERNAL'}
message: StringProperty()
level: EnumProperty(
name="level",
items=[
('INFO', 'INFO', ''),
('ERROR', 'ERROR', ''),
('DEBUG', 'DEBUG', ''),
('WARNING', 'WARNING', '')
]
)
@classmethod
def poll(cls, context):
if hasattr(context, 'window_manager'):
return True
return False
def draw(self, context):
layout = self.layout
col = layout.column()
rman_icon = 'ERROR'
if self.level == 'INFO':
rman_icon = 'INFO'
col.label(text='%s' % self.message, icon=rman_icon)
def execute(self, context):
#self.report({'ERROR'}, '%s' % self.message )
return{'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
classes = [
PRMAN_OT_Renderman_printer
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass |
prman-pixar/RenderManForBlender | rman_translators/rman_hair_translator.py | from .rman_translator import RmanTranslator
from ..rfb_utils import object_utils
from ..rfb_utils import scenegraph_utils
from ..rman_sg_nodes.rman_sg_hair import RmanSgHair
from mathutils import Vector
import math
import bpy
import numpy as np
class RmanHairTranslator(RmanTranslator):
def __init__(self, rman_scene):
super().__init__(rman_scene)
self.bl_type = 'HAIR'
def export(self, ob, psys, db_name):
sg_node = self.rman_scene.sg_scene.CreateGroup(db_name)
rman_sg_hair = RmanSgHair(self.rman_scene, sg_node, db_name)
return rman_sg_hair
def clear_children(self, ob, psys, rman_sg_hair):
if rman_sg_hair.sg_node:
for c in [ rman_sg_hair.sg_node.GetChild(i) for i in range(0, rman_sg_hair.sg_node.GetNumChildren())]:
rman_sg_hair.sg_node.RemoveChild(c)
self.rman_scene.sg_scene.DeleteDagNode(c)
rman_sg_hair.sg_curves_list.clear()
def export_deform_sample(self, rman_sg_hair, ob, psys, time_sample):
curves = self._get_strands_(ob, psys)
for i, (vertsArray, points, widths, scalpST) in enumerate(curves):
curves_sg = rman_sg_hair.sg_curves_list[i]
if not curves_sg:
continue
primvar = curves_sg.GetPrimVars()
primvar.SetPointDetail(self.rman_scene.rman.Tokens.Rix.k_P, points, "vertex", time_sample)
curves_sg.SetPrimVars(primvar)
def update(self, ob, psys, rman_sg_hair):
if rman_sg_hair.sg_node:
if rman_sg_hair.sg_node.GetNumChildren() > 0:
self.clear_children(ob, psys, rman_sg_hair)
curves = self._get_strands_(ob, psys)
if not curves:
return
for i, (vertsArray, points, widths, scalpST) in enumerate(curves):
curves_sg = self.rman_scene.sg_scene.CreateCurves("%s-%d" % (rman_sg_hair.db_name, i))
curves_sg.Define(self.rman_scene.rman.Tokens.Rix.k_cubic, "nonperiodic", "catmull-rom", len(vertsArray), len(points))
primvar = curves_sg.GetPrimVars()
primvar.SetPointDetail(self.rman_scene.rman.Tokens.Rix.k_P, points, "vertex")
primvar.SetIntegerDetail(self.rman_scene.rman.Tokens.Rix.k_Ri_nvertices, vertsArray, "uniform")
index_nm = psys.settings.renderman.hair_index_name
if index_nm == '':
index_nm = 'index'
primvar.SetIntegerDetail(index_nm, range(len(vertsArray)), "uniform")
width_detail = "constant"
if isinstance(widths, list):
width_detail = "vertex"
primvar.SetFloatDetail(self.rman_scene.rman.Tokens.Rix.k_width, widths, width_detail)
if len(scalpST):
primvar.SetFloatArrayDetail("scalpST", scalpST, 2, "uniform")
if rman_sg_hair.motion_steps:
super().set_primvar_times(rman_sg_hair.motion_steps, primvar)
curves_sg.SetPrimVars(primvar)
rman_sg_hair.sg_node.AddChild(curves_sg)
rman_sg_hair.sg_curves_list.append(curves_sg)
# Attach material
mat_idx = psys.settings.material - 1
if mat_idx < len(ob.material_slots):
mat = ob.material_slots[mat_idx].material
material_sg_node = None
if mat:
rman_sg_material = self.rman_scene.rman_materials.get(mat.original, None)
if rman_sg_material:
material_sg_node = rman_sg_material.sg_node
scenegraph_utils.set_material(rman_sg_hair.sg_node, material_sg_node)
def add_object_instance(self, rman_sg_hair, rman_sg_group):
rman_sg_hair.sg_node.AddChild(rman_sg_group.sg_node)
rman_sg_hair.instances[rman_sg_group.db_name] = rman_sg_group
rman_sg_group.rman_sg_group_parent = rman_sg_hair
def _get_strands_(self, ob, psys):
psys_modifier = None
for mod in ob.modifiers:
if hasattr(mod, 'particle_system') and mod.particle_system == psys:
psys_modifier = mod
break
if self.rman_scene.is_interactive:
if psys_modifier and not psys_modifier.show_viewport:
return None
else:
if psys_modifier and not psys_modifier.show_render:
return None
tip_width = psys.settings.tip_radius * psys.settings.radius_scale
base_width = psys.settings.root_radius * psys.settings.radius_scale
conwidth = (tip_width == base_width)
if self.rman_scene.is_interactive:
steps = (2 ** psys.settings.display_step)+1
else:
steps = (2 ** psys.settings.render_step)+1
if conwidth:
hair_width = base_width
else:
hair_width = []
num_parents = len(psys.particles)
num_children = len(psys.child_particles)
if self.rman_scene.is_interactive:
num_children = int(num_children * psys.settings.display_percentage / 100.0)
total_hair_count = num_parents + num_children
export_st = psys.settings.renderman.export_scalp_st and psys_modifier and len(
ob.data.uv_layers) > 0
curve_sets = []
points = []
vertsArray = []
scalpST = []
nverts = 0
ob_inv_mtx = ob.matrix_world.inverted_safe()
start_idx = 0
if psys.settings.child_type != 'NONE' and num_children > 0:
start_idx = num_parents
for pindex in range(start_idx, total_hair_count):
strand_points = []
# walk through each strand
for step in range(0, steps):
pt = psys.co_hair(ob, particle_no=pindex, step=step)
if pt.length_squared == 0:
# this strand ends prematurely
break
# put points in object space
pt = ob_inv_mtx @ pt
strand_points.append(pt)
if len(strand_points) > 1:
# double the first and last
strand_points = strand_points[:1] + \
strand_points + strand_points[-1:]
vertsInStrand = len(strand_points)
# catmull-rom requires at least 4 vertices
if vertsInStrand < 4:
continue
# for varying width make the width array
if not conwidth:
decr = (base_width - tip_width) / (vertsInStrand - 2)
hair_width.extend([base_width] + [(base_width - decr * i)
for i in range(vertsInStrand - 2)] +
[tip_width])
# add the last point again
points.extend(strand_points)
vertsArray.append(vertsInStrand)
nverts += vertsInStrand
# get the scalp ST
if export_st:
particle = psys.particles[
(pindex - num_parents) % num_parents]
st = psys.uv_on_emitter(psys_modifier, particle=particle, particle_no=pindex)
scalpST.append([st[0], st[1]])
# if we get more than 100000 vertices, export ri.Curve and reset. This
# is to avoid a maxint on the array length
if nverts > 100000:
curve_sets.append(
(vertsArray, points, hair_width, scalpST))
nverts = 0
points = []
vertsArray = []
if not conwidth:
hair_width = []
scalpST = []
if nverts > 0:
curve_sets.append((vertsArray, points,
hair_width, scalpST))
return curve_sets
|
prman-pixar/RenderManForBlender | rfb_utils/shadergraph_utils.py | from . import color_utils
from . import filepath_utils
from . import string_utils
from . import object_utils
from . import scene_utils
from ..rman_constants import RMAN_STYLIZED_FILTERS, RMAN_STYLIZED_PATTERNS, RMAN_UTILITY_PATTERN_NAMES, RFB_FLOAT3
import math
import bpy
class BlNodeInfo:
def __init__(self, sg_node, group_node=None, is_cycles_node=False):
self.sg_node = sg_node
self.group_node = group_node
self.is_cycles_node = is_cycles_node
class RmanConvertNode:
def __init__(self, node_type, from_node, from_socket, to_node, to_socket):
self.node_type = node_type
self.from_node = from_node
self.from_socket = from_socket
self.to_node = to_node
self.to_socket = to_socket
def is_renderman_nodetree(material):
return find_node(material, 'RendermanOutputNode')
def find_rman_output_node(nt):
nodetype = 'RendermanOutputNode'
ntree = None
for mat in bpy.data.materials:
if mat.node_tree is None:
continue
if mat.node_tree == nt:
ntree = mat.node_tree
break
for node in mat.node_tree.nodes:
# check if the node belongs to a group node
node_tree = getattr(node, 'node_tree', None)
if node_tree is None:
continue
if node_tree == nt:
ntree = mat.node_tree
if ntree is None:
return None
for node in ntree.nodes:
if getattr(node, "bl_idname", None) == nodetype:
if getattr(node, "is_active_output", True):
return node
if not active_output_node:
active_output_node = node
return active_output_node
def is_mesh_light(ob):
'''Checks to see if ob is a RenderMan mesh light
Args:
ob (bpy.types.Object) - Object caller wants to check.
Returns:
(bpy.types.Node) - the PxrMeshLight node if this is a mesh light. Else, returns None.
'''
#mat = getattr(ob, 'active_material', None)
mat = object_utils.get_active_material(ob)
if not mat:
return None
output = is_renderman_nodetree(mat)
if not output:
return None
if len(output.inputs) > 1:
socket = output.inputs[1]
if socket.is_linked:
node = socket.links[0].from_node
if node.bl_label == 'PxrMeshLight':
return node
return None
def is_rman_light(ob, include_light_filters=True):
'''Checks to see if ob is a RenderMan light
Args:
ob (bpy.types.Object) - Object caller wants to check.
include_light_filters (bool) - whether or not light filters should be included
Returns:
(bpy.types.Node) - the shading node, else returns None.
'''
return get_light_node(ob, include_light_filters=include_light_filters)
def get_rman_light_properties_group(ob):
'''Return the RendermanLightSettings properties
for this object.
Args:
ob (bpy.types.Object) - Object caller wants to get the RendermanLightSettings for.
Returns:
(RendermanLightSettings) - RendermanLightSettings object
'''
if ob.type == 'LIGHT':
return ob.data.renderman
else:
#mat = ob.active_material
mat = object_utils.get_active_material(ob)
if mat:
return mat.renderman_light
return None
def get_light_node(ob, include_light_filters=True):
'''Return the shading node for this light object.
Args:
ob (bpy.types.Object) - Object caller is interested in.
include_light_filters (bool) - whether or not light filters should be included
Returns:
(bpy.types.Node) - The associated shading node for ob
'''
if ob.type == 'LIGHT':
if hasattr(ob.data, 'renderman'):
if include_light_filters:
return ob.data.renderman.get_light_node()
elif ob.data.renderman.renderman_light_role == 'RMAN_LIGHT':
return ob.data.renderman.get_light_node()
else:
return is_mesh_light(ob)
def socket_node_input(nt, socket):
return next((l.from_node for l in nt.links if l.to_socket == socket), None)
def socket_socket_input(nt, socket):
return next((l.from_socket for l in nt.links if l.to_socket == socket and socket.is_linked),
None)
def get_socket_name(node, socket):
if type(socket) == dict:
return socket['name'].replace(' ', '')
# if this is a renderman node we can just use the socket name,
else:
if not hasattr('node', 'plugin_name'):
from .. import rman_bl_nodes
# cycles node?
mapping, node_desc = rman_bl_nodes.get_cycles_node_desc(node)
if node_desc:
idx = -1
is_output = socket.is_output
if is_output:
for i, output in enumerate(node.outputs):
if socket.name == output.name:
idx = i
break
else:
for i, input in enumerate(node.inputs):
if socket.name == input.name:
idx = i
break
if idx == -1:
return socket.identifier.replace(' ', '')
if is_output:
node_desc_param = node_desc.outputs[idx]
else:
node_desc_param = node_desc.params[idx]
return node_desc_param.name
else:
if socket.name in node.inputs and socket.name in node.outputs:
suffix = 'Out' if socket.is_output else 'In'
return socket.name.replace(' ', '') + suffix
return socket.identifier.replace(' ', '')
def has_lobe_enable_props(node):
if node.bl_idname in {"PxrSurfaceBxdfNode", "PxrLayerPatternOSLNode", "PxrLayerPatternNode"}:
return True
return False
def get_socket_type(node, socket):
sock_type = socket.type.lower()
if sock_type == 'rgba':
return 'color'
elif sock_type == 'value':
return 'float'
elif sock_type == 'vector':
return 'point'
else:
return sock_type
def get_node_name(node, mat_name):
node_name = string_utils.sanitize_node_name('%s_%s' % (mat_name, node.name))
return node_name
def linked_sockets(sockets):
if sockets is None:
return []
return [i for i in sockets if i.is_linked]
def is_socket_same_type(socket1, socket2):
'''Compare two NodeSockets to see if they are of the same type. Types that
are float3 like are considered the same.
Arguments:
socket1 (bpy.types.NodeSocket) - first socket to compare
socket2 (bpy.types.NodeSocket) - second socket to compare
Returns:
(bool) - return True if both sockets are the same type
'''
return (type(socket1) == type(socket2)) or (is_socket_float_type(socket1) and is_socket_float_type(socket2)) or \
(is_socket_float3_type(socket1) and is_socket_float3_type(socket2))
def is_socket_float_type(socket):
'''Check if socket is of float type
Arguments:
socket (bpy.types.NodeSocket) - socket to check
Returns:
(bool) - return True if socket are float type
'''
renderman_type = getattr(socket, 'renderman_type', None)
if renderman_type:
return renderman_type in ['int', 'float']
else:
return socket.type in ['INT', 'VALUE']
def is_socket_float3_type(socket):
'''Check if socket is of float3 type
Arguments:
socket (bpy.types.NodeSocket) - socket to check
Returns:
(bool) - return True if socket is float3 type
'''
renderman_type = getattr(socket, 'renderman_type', None)
if renderman_type:
return renderman_type in RFB_FLOAT3
else:
return socket.type in ['RGBA', 'VECTOR']
# do we need to convert this socket?
def do_convert_socket(from_socket, to_socket):
if not to_socket:
return False
return (is_socket_float_type(from_socket) and is_socket_float3_type(to_socket)) or \
(is_socket_float3_type(from_socket) and is_socket_float_type(to_socket))
def find_node_input(node, name):
for input in node.inputs:
if input.name == name:
return input
return None
def find_node(material, nodetype):
if material and material.node_tree:
ntree = material.node_tree
active_output_node = None
for node in ntree.nodes:
if getattr(node, "bl_idname", None) == nodetype:
if getattr(node, "is_active_output", True):
return node
if not active_output_node:
active_output_node = node
return active_output_node
return None
def find_node_from_nodetree(ntree, nodetype):
active_output_node = None
for node in ntree.nodes:
if getattr(node, "bl_idname", None) == nodetype:
if getattr(node, "is_active_output", True):
return node
if not active_output_node:
active_output_node = node
return active_output_node
def find_material_from_nodetree(ntree):
mat = None
for m in bpy.data.materials:
if m.node_tree == ntree.id_data:
mat = m
break
return mat
def is_soloable_node(node):
is_soloable = False
node_type = getattr(node, 'renderman_node_type', '')
if node_type in ('pattern', 'bxdf'):
if node.bl_label in ['PxrLayer', 'PxrLayerMixer']:
is_soloable = False
else:
is_soloable = True
return is_soloable
def find_soloable_node(ntree):
selected_node = None
for n in ntree.nodes:
node_type = getattr(n, 'renderman_node_type', '')
if n.select and node_type in ('pattern', 'bxdf'):
if n.bl_label in ['PxrLayer', 'PxrLayerMixer']:
continue
selected_node = n
break
return selected_node
def find_selected_pattern_node(ntree):
selected_node = None
for n in ntree.nodes:
node_type = getattr(n, 'renderman_node_type', '')
if n.select and node_type == 'pattern':
if n.bl_label in ['PxrLayer', 'PxrLayerMixer']:
continue
selected_node = n
break
return selected_node
def find_node_input(node, name):
for input in node.inputs:
if input.name == name:
return input
return None
# walk the tree for nodes to export
def gather_nodes(node):
nodes = []
for socket in node.inputs:
if socket.is_linked:
link = socket.links[0]
for sub_node in gather_nodes(socket.links[0].from_node):
if sub_node not in nodes:
nodes.append(sub_node)
if link.from_node.bl_idname == 'NodeReroute':
continue
if node.bl_idname == 'NodeReroute':
continue
# if this is a float->float3 type or float3->float connections, insert
# either PxrToFloat3 or PxrToFloat conversion nodes
if is_socket_float_type(link.from_socket) and is_socket_float3_type(socket):
convert_node = RmanConvertNode('PxrToFloat3', link.from_node, link.from_socket, link.to_node, link.to_socket)
if convert_node not in nodes:
nodes.append(convert_node)
elif is_socket_float3_type(link.from_socket) and is_socket_float_type(socket):
convert_node = RmanConvertNode('PxrToFloat', link.from_node, link.from_socket, link.to_node, link.to_socket)
if convert_node not in nodes:
nodes.append(convert_node)
if hasattr(node, 'renderman_node_type') and node.renderman_node_type != 'output':
nodes.append(node)
elif not hasattr(node, 'renderman_node_type') and node.bl_idname not in ['ShaderNodeOutputMaterial', 'NodeGroupInput', 'NodeGroupOutput']:
nodes.append(node)
return nodes
def gather_all_nodes_for_material(mat, nodes_list):
for node in mat.node_tree.nodes:
if node not in nodes_list:
nodes_list.append(node)
if node.bl_idname == 'ShaderNodeGroup':
for n in node.node_tree.nodes:
nodes_list.insert(0, n)
def get_group_node(node):
'''
Find the group node that this NodeGroupOutput or
NodeGroupInput belongs to
Returns
(bpy.types.NodeGroup)
'''
current_group_node = None
users = bpy.context.blend_data.user_map(subset={node.id_data})
for group_nt in users[node.id_data]:
if isinstance(group_nt, bpy.types.Material):
continue
for n in group_nt.nodes:
if n.bl_idname == 'ShaderNodeGroup':
for n2 in n.node_tree.nodes:
if n2 == node:
current_group_node = n
break
return current_group_node
def get_all_shading_nodes():
'''Find all shading nodes in the scene
Returns:
(list) - list of all the shading nodes
'''
nodes = list()
context = bpy.context
scene = context.scene
world = scene.world
integrator = find_integrator_node(world)
if integrator:
nodes.append(integrator)
nodes.extend(find_displayfilter_nodes(world))
nodes.extend(find_samplefilter_nodes(world))
for cam in bpy.data.cameras:
n = find_projection_node(cam)
if n:
nodes.append(n)
for light in scene_utils.get_all_lights(scene):
n = get_light_node(light)
if n:
nodes.append(n)
def get_group_nodes(group_node, nodes):
for n in group_node.node_tree.nodes:
if n.bl_idname == 'ShaderNodeGroup':
get_group_nodes(n, nodes)
else:
rman_type = getattr(n, 'renderman_node_type', None)
if not rman_type:
continue
if hasattr(n, 'prop_meta'):
nodes.append(n)
for mat in bpy.data.materials:
if not mat.use_nodes:
continue
for n in mat.node_tree.nodes:
if n.bl_idname == 'ShaderNodeGroup':
get_group_nodes(n, nodes)
continue
rman_type = getattr(n, 'renderman_node_type', None)
if not rman_type:
continue
if hasattr(n, 'prop_meta'):
nodes.append(n)
return nodes
def save_bl_ramps(bl_scene):
'''
Save all ramps to our custom collection properties
'''
for node in get_all_shading_nodes():
if not hasattr(node, 'rman_fake_node_group'):
continue
for prop_name, meta in node.prop_meta.items():
param_widget = meta.get('widget', 'default')
param_type = meta['renderman_type']
if param_type == 'colorramp':
nt = bpy.data.node_groups.get(node.rman_fake_node_group, None)
if nt:
prop = getattr(node, prop_name)
ramp_name = prop
color_ramp_node = nt.nodes[ramp_name]
colors = []
positions = []
bl_ramp = '%s_bl_ramp' % prop_name
bl_ramp_prop = getattr(node, bl_ramp)
bl_ramp_prop.clear()
for e in color_ramp_node.color_ramp.elements:
r = bl_ramp_prop.add()
r.position = e.position
r.rman_value = e.color
elif param_type == 'floatramp':
nt = bpy.data.node_groups.get(node.rman_fake_node_group, None)
if nt:
prop = getattr(node, prop_name)
ramp_name = prop
float_ramp_node = nt.nodes[ramp_name]
curve = float_ramp_node.mapping.curves[0]
knots = []
vals = []
bl_ramp = '%s_bl_ramp' % prop_name
bl_ramp_prop = getattr(node, bl_ramp)
bl_ramp_prop.clear()
for p in curve.points:
r = bl_ramp_prop.add()
r.position = p.location[0]
r.rman_value = p.location[1]
def reload_bl_ramps(bl_scene, check_library=True):
'''
Reload all ramps from our custom collection properties. We only
do this if the NodeTree is from a library.
'''
for node in get_all_shading_nodes():
nt = node.id_data
if check_library and not nt.library:
continue
if not hasattr(node, 'rman_fake_node_group'):
continue
color_rman_ramps = node.__annotations__.get('__COLOR_RAMPS__', [])
float_rman_ramps = node.__annotations__.get('__FLOAT_RAMPS__', [])
node_group = bpy.data.node_groups.get(node.rman_fake_node_group, None)
if not node_group:
node_group = bpy.data.node_groups.new(
node.rman_fake_node_group, 'ShaderNodeTree')
node_group.use_fake_user = True
for prop_name in color_rman_ramps:
prop = getattr(node, prop_name)
ramp_name = prop
n = node_group.nodes.get(ramp_name, None)
if not n:
n = node_group.nodes.new('ShaderNodeValToRGB')
n.name = ramp_name
bl_ramp_prop = getattr(node, '%s_bl_ramp' % prop_name)
if len(bl_ramp_prop) < 1:
continue
elements = n.color_ramp.elements
for i in range(0, len(bl_ramp_prop)):
r = bl_ramp_prop[i]
if i < len(elements):
elem = elements[i]
elem.position = r.position
else:
elem = elements.new(r.position)
elem.color = r.rman_value
if len(bl_ramp_prop) < len(elements):
for elem in [elements[i] for i in range(len(bl_ramp_prop), len(elements)-1)]:
elements.remove(elem)
# we cannot remove the last element, so
# just copy the values and remove the second to last
# element
last_elem = elements[-1]
prev_elem = elements[-2]
last_elem.color = prev_elem.color
last_elem.position = prev_elem.position
elements.remove(prev_elem)
for prop_name in float_rman_ramps:
prop = getattr(node, prop_name)
ramp_name = prop
n = node_group.nodes.get(ramp_name, None)
if not n:
n = node_group.nodes.new('ShaderNodeVectorCurve')
n.name = ramp_name
bl_ramp_prop = getattr(node, '%s_bl_ramp' % prop_name)
if len(bl_ramp_prop) < 1:
continue
curve = n.mapping.curves[0]
points = curve.points
for i in range(0, len(bl_ramp_prop)):
r = bl_ramp_prop[i]
if i < len(points):
point = points[i]
point.location[0] = r.position
point.location[1] = r.rman_value
else:
points.new(r.position, r.rman_value)
if len(bl_ramp_prop) < len(points):
for elem in [points[i] for i in range(len(bl_ramp_prop), len(points)-1)]:
points.remove(elem)
last_elem = points[-1]
prev_elem = points[-2]
last_elem.location[0] = prev_elem.location[0]
last_elem.location[1] = prev_elem.location[1]
points.remove(prev_elem)
def is_texture_property(prop_name, meta):
param_type = meta['renderman_type']
if param_type != 'string':
return False
options = meta['options']
param_widget = meta.get('widget', 'default')
if param_widget in ['fileinput', 'assetidinput']:
if 'ies' in options:
return False
elif ('texture' in options) or ('env' in options) or ('imageplane' in options):
return True
return False
def get_rerouted_node(node):
'''Find and return the rerouted node and socket, given
a NodeReroute node
Arguments:
node (bpy.types.Node) - A shader node of type NodeReroute
Returns:
(bpy.types.Node) - the rerouted node
(bpy.types.NodeSocket) - the socket that should be connected from the rerouted node
'''
if not node.inputs[0].is_linked:
return (None, None)
from_node = node.inputs[0].links[0].from_node
if from_node.bl_idname == 'NodeReroute':
return get_rerouted_node(from_node)
socket = node.inputs[0].links[0].from_socket
return (from_node, socket)
def find_integrator_node(world):
'''Find and return the integrator node from the world nodetree
Arguments:
world (bpy.types.World) - Blender world object
Returns:
(RendermanIntegratorNode) - the integrator ShadingNode
'''
rm = world.renderman
if not world.renderman.use_renderman_node:
return None
output = find_node(world, 'RendermanIntegratorsOutputNode')
if output:
socket = output.inputs[0]
if socket.is_linked:
return socket.links[0].from_node
return None
def find_displayfilter_nodes(world):
'''Find and return all display filter nodes from the world nodetree
Arguments:
world (bpy.types.World) - Blender world object
Returns:
(list) - list of display filter nodes
'''
df_nodes = []
if not world.renderman.use_renderman_node:
return df_nodes
output = find_node(world, 'RendermanDisplayfiltersOutputNode')
if output:
for i, socket in enumerate(output.inputs):
if socket.is_linked:
bl_df_node = socket.links[0].from_node
df_nodes.append(bl_df_node)
return df_nodes
def find_samplefilter_nodes(world):
'''Find and return all sample filter nodes from the world nodetree
Arguments:
world (bpy.types.World) - Blender world object
Returns:
(list) - list of sample filter nodes
'''
sf_nodes = []
if not world.renderman.use_renderman_node:
return sf_nodes
output = find_node(world, 'RendermanSamplefiltersOutputNode')
if output:
for i, socket in enumerate(output.inputs):
if socket.is_linked:
bl_sf_node = socket.links[0].from_node
sf_nodes.append(bl_sf_node)
return sf_nodes
def find_projection_node(camera):
'''Find the projection node, if any
Arguments:
camera (bpy.types.Camera) - Camera object
Returns:
(bpy.types.ShaderNode) - projection node
'''
projection_node = None
if isinstance(camera, bpy.types.Camera):
nt = camera.renderman.rman_nodetree
else:
nt = camera.data.renderman.rman_nodetree
if nt:
output = find_node_from_nodetree(nt, 'RendermanProjectionsOutputNode')
socket = output.inputs[0]
if socket.is_linked:
projection_node = socket.links[0].from_node
return projection_node
def find_all_stylized_filters(world):
nodes = list()
output = find_node(world, 'RendermanDisplayfiltersOutputNode')
if not output:
return nodes
for i, socket in enumerate(output.inputs):
if socket.is_linked:
link = socket.links[0]
node = link.from_node
if node.bl_label in RMAN_STYLIZED_FILTERS:
nodes.append(node)
return nodes
def has_stylized_pattern_node(ob, node=None):
prop_name = ''
if not node:
mat = object_utils.get_active_material(ob)
if not mat:
return False
nt = mat.node_tree
output = is_renderman_nodetree(mat)
if not output:
return False
socket = output.inputs[0]
if not socket.is_linked:
return False
link = socket.links[0]
node = link.from_node
for nm in RMAN_UTILITY_PATTERN_NAMES:
if hasattr(node, nm):
prop_name = nm
prop_meta = node.prop_meta[prop_name]
if prop_meta['renderman_type'] == 'array':
array_len = getattr(node, '%s_arraylen' % prop_name)
for i in range(0, array_len):
nm = '%s[%d]' % (prop_name, i)
sub_prop = getattr(node, nm)
if hasattr(node, 'inputs') and nm in node.inputs and \
node.inputs[nm].is_linked:
to_socket = node.inputs[nm]
from_node = to_socket.links[0].from_node
if from_node.bl_label in RMAN_STYLIZED_PATTERNS:
return from_node
elif node.inputs[prop_name].is_linked:
to_socket = node.inputs[prop_name]
from_node = to_socket.links[0].from_node
if from_node.bl_label in RMAN_STYLIZED_PATTERNS:
return from_node
return False
def create_pxrlayer_nodes(nt, bxdf):
from .. import rman_bl_nodes
mixer = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__["PxrLayerMixer"])
layer1 = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__["PxrLayer"])
layer2 = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__["PxrLayer"])
mixer.location = bxdf.location
mixer.location[0] -= 300
layer1.location = mixer.location
layer1.location[0] -= 300
layer1.location[1] += 300
layer2.location = mixer.location
layer2.location[0] -= 300
layer2.location[1] -= 300
nt.links.new(mixer.outputs[0], bxdf.inputs[0])
nt.links.new(layer1.outputs[0], mixer.inputs['baselayer'])
nt.links.new(layer2.outputs[0], mixer.inputs['layer1'])
def _convert_grease_pencil_stroke_texture(mat, nt, output):
from .. import rman_bl_nodes
gp_mat = mat.grease_pencil
col = gp_mat.color[:3]
# col = color_utils.linearizeSRGB(col)
alpha = gp_mat.color[3]
bl_image = gp_mat.stroke_image
bxdf = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrConstant'])
bxdf.location = output.location
bxdf.location[0] -= 300
bxdf.emitColor = col
bxdf.presence = alpha
nt.links.new(bxdf.outputs[0], output.inputs[0])
if not bl_image:
bxdf.emitColor = [0.0, 0.0, 0.0, 1.0]
else:
real_file = filepath_utils.get_real_path(bl_image.filepath)
manifold = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrManifold2D'])
manifold.angle = -math.degrees(gp_mat.pattern_angle)
manifold.scaleS = gp_mat.pattern_scale[0]
manifold.scaleT = gp_mat.pattern_scale[1]
manifold.offsetS = gp_mat.texture_offset[0]
manifold.offsetT = gp_mat.texture_offset[1]
manifold.invertT = 1
texture = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrTexture'])
texture.filename = real_file
texture.linearize = 1
nt.links.new(manifold.outputs[0], texture.inputs[3])
mix = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrMix'])
mix.color2 = col
mix.mix = gp_mat.mix_stroke_factor
nt.links.new(texture.outputs[0], mix.inputs[0])
nt.links.new(mix.outputs[0], bxdf.inputs[0])
nt.links.new(texture.outputs[4], bxdf.inputs[1])
def _convert_grease_pencil_fill_texture(mat, nt, output):
from .. import rman_bl_nodes
gp_mat = mat.grease_pencil
col = gp_mat.fill_color[:3]
# col = color_utils.linearizeSRGB(col)
alpha = gp_mat.fill_color[3]
mix_color = gp_mat.mix_color[:3]
mix_alpha = gp_mat.mix_color[3]
bxdf = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrConstant'])
bxdf.location = output.location
bxdf.location[0] -= 300
bxdf.emitColor = col
bxdf.presence = alpha
nt.links.new(bxdf.outputs[0], output.inputs[0])
bl_image = gp_mat.fill_image
if not bl_image:
bxdf.emitColor = [0.0, 0.0, 0.0, 1.0]
else:
real_file = filepath_utils.get_real_path(bl_image.filepath)
manifold = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrManifold2D'])
manifold.angle = -math.degrees(gp_mat.texture_angle)
manifold.scaleS = gp_mat.texture_scale[0]
manifold.scaleT = gp_mat.texture_scale[1]
manifold.offsetS = gp_mat.texture_offset[0]
manifold.offsetT = gp_mat.texture_offset[1]
manifold.invertT = 1
texture = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrTexture'])
texture.filename = real_file
texture.linearize = 1
nt.links.new(manifold.outputs[0], texture.inputs[3])
mix = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrMix'])
mix.color2 = col
mix.mix = gp_mat.mix_factor
nt.links.new(texture.outputs[0], mix.inputs[0])
nt.links.new(mix.outputs[0], bxdf.inputs[0])
nt.links.new(texture.outputs[4], bxdf.inputs[1])
def _convert_grease_pencil_fill_checker(mat, nt, output):
from .. import rman_bl_nodes
gp_mat = mat.grease_pencil
col = gp_mat.fill_color[:3]
# col = color_utils.linearizeSRGB(col)
alpha = gp_mat.fill_color[3]
mix_color = gp_mat.mix_color[:3]
mix_alpha = gp_mat.mix_color[3]
bxdf = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrConstant'])
bxdf.location = output.location
bxdf.location[0] -= 300
bxdf.emitColor = col
bxdf.presence = alpha
nt.links.new(bxdf.outputs[0], output.inputs[0])
manifold = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrManifold2D'])
manifold.angle = -math.degrees(gp_mat.pattern_angle)
manifold.scaleS = (1/gp_mat.pattern_gridsize) * gp_mat.pattern_scale[0]
manifold.scaleT = (1/gp_mat.pattern_gridsize) * gp_mat.pattern_scale[1]
checker = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrChecker'])
checker.colorA = col
checker.colorB = mix_color
nt.links.new(manifold.outputs[0], checker.inputs[2])
checker2 = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrChecker'])
checker2.colorA = col
checker2.colorB = mix_color
nt.links.new(manifold.outputs[0], checker2.inputs[2])
float3_1 = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrToFloat3'])
float3_1.input = alpha
float3_2 = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrToFloat3'])
float3_2.input = mix_alpha
mix = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrMix'])
nt.links.new(float3_1.outputs[0], mix.inputs[0])
nt.links.new(float3_2.outputs[0], mix.inputs[1])
nt.links.new(checker2.outputs[1], mix.inputs[2])
nt.links.new(checker.outputs[0], bxdf.inputs[0])
nt.links.new(mix.outputs[0], bxdf.inputs[1])
def convert_grease_pencil_mat(mat, nt, output):
from .. import rman_bl_nodes
gp_mat = mat.grease_pencil
if gp_mat.show_stroke:
stroke_style = gp_mat.stroke_style
if stroke_style == 'TEXTURE':
_convert_grease_pencil_stroke_texture(mat, nt, output)
else:
col = gp_mat.color[:3]
# col = color_utils.linearizeSRGB(col)
alpha = gp_mat.color[3]
bxdf = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrConstant'])
bxdf.location = output.location
bxdf.location[0] -= 300
bxdf.emitColor = col
bxdf.presence = alpha
nt.links.new(bxdf.outputs[0], output.inputs[0])
elif gp_mat.show_fill:
fill_style = gp_mat.fill_style
if fill_style == 'CHECKER':
_convert_grease_pencil_fill_checker(mat, nt, output)
elif fill_style == 'TEXTURE':
_convert_grease_pencil_fill_texture(mat, nt, output)
else:
col = gp_mat.fill_color[:3]
# col = color_utils.linearizeSRGB(col)
alpha = gp_mat.fill_color[3]
mix_color = gp_mat.mix_color[:3]
mix_alpha = gp_mat.mix_color[3]
bxdf = nt.nodes.new(rman_bl_nodes.__BL_NODES_MAP__['PxrConstant'])
bxdf.location = output.location
bxdf.location[0] -= 300
bxdf.emitColor = col
bxdf.presence = alpha
nt.links.new(bxdf.outputs[0], output.inputs[0])
|
prman-pixar/RenderManForBlender | rfb_utils/color_manager_blender.py | import os
import bpy
import sys
from ..rfb_utils.envconfig_utils import envconfig
try:
from rman_utils.color_manager import ColorManager
except:
ColorManager = None
__clrmgr__ = None
class ColorManagerBlender(ColorManager):
def __init__(self, config_path, **kwargs):
super(ColorManagerBlender, self).__init__(config_path, **kwargs)
def update(self):
ociopath = get_env_config_path()
super(ColorManagerBlender, self).update(ociopath)
def color_manager():
"""return the color manager singleton
"""
if __clrmgr__ is None:
init()
return __clrmgr__
def init():
"""initialize ColorManager
"""
global __clrmgr__
if __clrmgr__ is None:
ociopath = get_env_config_path()
if ColorManager:
__clrmgr__ = ColorManagerBlender(ociopath)
def get_env_config_path():
"""return ocio config path from the environment
"""
blender_config_path = envconfig().get_blender_ocio_config()
ociopath = envconfig().getenv('OCIO', blender_config_path)
return ociopath
def get_config_path():
"""return ocio config path
"""
clrmgr = color_manager()
if clrmgr:
return clrmgr.config_file_path()
return get_env_config_path()
def get_colorspace_name():
"""return the scene colorspace name. updating with $OCIO
"""
clrmgr = color_manager()
if ColorManager:
clrmgr.update()
return clrmgr.scene_colorspace_name
return "" |
prman-pixar/RenderManForBlender | rman_properties/rman_properties_scene/__init__.py | from bpy.props import PointerProperty, StringProperty, BoolProperty, \
EnumProperty, IntProperty, FloatProperty, \
CollectionProperty
from ...rfb_utils.envconfig_utils import envconfig
from ...rfb_utils.prefs_utils import get_pref
from ...rfb_logger import rfb_log
from ... import rman_render
from ... import rman_bl_nodes
from ...rman_bl_nodes import rman_bl_nodes_props
from ..rman_properties_misc import RendermanLightGroup, RendermanGroup, LightLinking, RendermanUserTokenGroup, RendermanVolumeAggregate
from ..rman_properties_renderlayers import RendermanRenderLayerSettings
from ... import rman_config
from ...rman_config import RmanBasePropertyGroup
import bpy
import os
import sys
class RendermanSceneSettings(RmanBasePropertyGroup, bpy.types.PropertyGroup):
rman_config_name: StringProperty(name='rman_config_name',
default='rman_properties_scene')
light_groups: CollectionProperty(type=RendermanLightGroup,
name='Light Groups')
light_groups_index: IntProperty(min=-1, default=-1)
light_mixer_groups: CollectionProperty(type=RendermanLightGroup,
name='Light Mixer Groups')
light_mixer_groups_index: IntProperty(min=-1, default=-1)
light_links: CollectionProperty(type=LightLinking,
name='Light Links')
def update_light_link_index(self, context):
scene = context.scene
rm = scene.renderman
if rm.light_links_index == -1 or rm.light_links_index >= len(rm.light_links):
return
light_links = rm.light_links[rm.light_links_index]
light_ob = light_links.light_ob
if context.view_layer.objects.active:
context.view_layer.objects.active.select_set(False)
light_ob.select_set(True)
context.view_layer.objects.active = light_ob
light_links_index: IntProperty(min=-1, default=-1, update=update_light_link_index)
def update_scene_solo_light(self, context):
rr = rman_render.RmanRender.get_rman_render()
if self.solo_light:
rr.rman_scene_sync.update_solo_light(context)
else:
rr.rman_scene_sync.update_un_solo_light(context)
solo_light: BoolProperty(name="Solo Light", update=update_scene_solo_light, default=False)
render_selected_objects_only: BoolProperty(
name="Only Render Selected",
description="Render only the selected object(s)",
default=False)
external_animation: BoolProperty(
name="Render Animation",
description="Spool Animation",
default=False)
# Trace Sets
object_groups: CollectionProperty(
type=RendermanGroup, name="Trace Sets")
object_groups_index: IntProperty(min=-1, default=-1)
# Volume aggregates
vol_aggregates: CollectionProperty(
type=RendermanVolumeAggregate, name="Volume Aggregates")
vol_aggregates_index: IntProperty(min=-1, default=-1)
# Tokens
version_token: IntProperty(name="version", default=1, min=1)
take_token: IntProperty(name="take", default=1, min=1)
blend_token: StringProperty(name="", default="")
user_tokens: CollectionProperty(type=RendermanUserTokenGroup, name="User Tokens")
user_tokens_index: IntProperty(min=-1, max=10, default=-1)
# txmanager
txmanagerData: StringProperty(name="txmanagerData", default="")
# Renderer Status properties
def get_platform(self):
if sys.platform == ("win32"):
return 'windows'
elif sys.platform == ("darwin"):
return 'macOS'
else:
return 'linux'
def get_is_ncr_license(self):
return envconfig().is_ncr_license
def get_has_xpu_license(self):
return envconfig().has_xpu_license
def get_has_stylized_license(self):
return envconfig().has_stylized_license
def get_is_rman_running(self):
from ...rman_render import RmanRender
rman_render = RmanRender.get_rman_render()
return rman_render.rman_running
def get_is_rman_interactive_running(self):
from ...rman_render import RmanRender
from ...rfb_utils import scene_utils
rman_render = RmanRender.get_rman_render()
is_shading = scene_utils.any_areas_shading()
return (rman_render.rman_interactive_running or is_shading)
def get_is_rman_swatch_render_running(self):
from ...rman_render import RmanRender
rman_render = RmanRender.get_rman_render()
return rman_render.rman_swatch_render_running
def get_is_rman_viewport_rendering(self):
from ...rman_render import RmanRender
from ...rfb_utils import scene_utils
rman_render = RmanRender.get_rman_render()
is_shading = scene_utils.any_areas_shading()
return (rman_render.rman_is_viewport_rendering or is_shading)
def get_light_linking_inverted(self):
return get_pref('rman_invert_light_linking')
current_platform: StringProperty(get=get_platform)
is_ncr_license: BoolProperty(get=get_is_ncr_license)
has_xpu_license: BoolProperty(get=get_has_xpu_license)
has_stylized_license: BoolProperty(get=get_has_stylized_license)
is_rman_running: BoolProperty(get=get_is_rman_running)
is_rman_interactive_running: BoolProperty(get=get_is_rman_interactive_running)
is_rman_swatch_render_running: BoolProperty(get=get_is_rman_swatch_render_running)
is_rman_viewport_rendering: BoolProperty(get=get_is_rman_viewport_rendering)
invert_light_linking: BoolProperty(get=get_light_linking_inverted)
# Roz Stats Properties
def get_roz_stats_progress(self):
from ...rman_render import RmanRender
rman_render = RmanRender.get_rman_render()
return rman_render.stats_mgr._progress
def get_roz_stats_iterations(self):
from ...rman_render import RmanRender
stats_mgr = RmanRender.get_rman_render().stats_mgr
itr = stats_mgr._iterations
maxSamples = stats_mgr._maxSamples
if maxSamples <= 0:
return 0.0
frac = itr/maxSamples
return (frac * 100)
roz_stats_progress: IntProperty(name='Progress', subtype='PERCENTAGE', min=0, max=100, get=get_roz_stats_progress)
roz_stats_iterations: IntProperty(name='Iterations', subtype='PERCENTAGE', min=0, max=100, get=get_roz_stats_iterations)
classes = [
RendermanSceneSettings
]
def register():
for cls in classes:
cls._add_properties(cls, 'rman_properties_scene')
bpy.utils.register_class(cls)
bpy.types.Scene.renderman = PointerProperty(
type=RendermanSceneSettings, name="Renderman Scene Settings")
def unregister():
del bpy.types.Scene.renderman
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass
|
prman-pixar/RenderManForBlender | rman_properties/rman_properties_volume/__init__.py | <reponame>prman-pixar/RenderManForBlender<filename>rman_properties/rman_properties_volume/__init__.py
from bpy.props import BoolProperty, PointerProperty, StringProperty, EnumProperty
from ...rfb_logger import rfb_log
from ...rman_config import RmanBasePropertyGroup
from ...rfb_utils import filepath_utils
import os
import bpy
class RendermanVolumeGeometrySettings(RmanBasePropertyGroup, bpy.types.PropertyGroup):
def check_openvdb(self):
ob = bpy.context.object
if ob.type != 'VOLUME':
return False
volume = bpy.context.volume
openvdb_file = filepath_utils.get_real_path(volume.filepath)
return os.path.exists(openvdb_file)
has_openvdb: BoolProperty(name='', get=check_openvdb)
def get_velocity_grids(self, context):
ob = context.object
items = []
items.append(('__NONE__', 'None', ''))
if ob.type != 'VOLUME':
return items
volume = ob.data
grids = volume.grids
for i, grid in enumerate(grids):
if grid.data_type in ['VECTOR_FLOAT', 'VECTOR_DOUBLE', 'VECTOR_INT']:
items.append((grid.name, grid.name, ''))
return items
openvdb_velocity_grid_name: EnumProperty(name="Velocity Grid", items=get_velocity_grids)
classes = [
RendermanVolumeGeometrySettings
]
def register():
for cls in classes:
cls._add_properties(cls, 'rman_properties_volume')
bpy.utils.register_class(cls)
bpy.types.Volume.renderman = PointerProperty(
type=RendermanVolumeGeometrySettings,
name="Renderman Voume Geometry Settings")
def unregister():
del bpy.types.Volume.renderman
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass |
prman-pixar/RenderManForBlender | rman_operators/rman_operators_vol_aggregates.py | from bpy.props import StringProperty, BoolProperty, EnumProperty, IntProperty, CollectionProperty, PointerProperty
from ..rfb_utils import string_utils
from ..rfb_logger import rfb_log
from ..rfb_utils import shadergraph_utils
from ..rfb_utils import scenegraph_utils
from ..rfb_utils import object_utils
from ..rfb_utils.scene_utils import RMAN_VOL_TYPES
import bpy
class COLLECTION_OT_volume_aggregates_add_remove(bpy.types.Operator):
bl_label = "Add or Remove Volume Aggregates"
bl_idname = "renderman.add_remove_volume_aggregates"
action: EnumProperty(
name="Action",
description="Either add or remove properties",
items=[('ADD', 'Add', ''),
('REMOVE', 'Remove', '')],
default='ADD')
context: StringProperty(
name="Context",
description="Name of context member to find renderman pointer in",
default="")
collection: StringProperty(
name="Collection",
description="The collection to manipulate",
default="")
collection_index: StringProperty(
name="Index Property",
description="The property used as a collection index",
default="")
defaultname: StringProperty(
name="Default Name",
description="Default name to give this collection item",
default="")
@classmethod
def description(cls, context, properties):
description = "Add a new volume aggregate group"
if properties.action == "REMOVE":
description = "Remove the selected volume aggregate group"
return description
def invoke(self, context, event):
scene = context.scene
id = string_utils.getattr_recursive(context, self.properties.context)
rm = id.renderman if hasattr(id, 'renderman') else id
prop_coll = self.properties.collection
coll_idx = self.properties.collection_index
collection = getattr(rm, prop_coll)
index = getattr(rm, coll_idx)
if self.properties.action == 'ADD':
dflt_name = self.properties.defaultname
for coll in collection:
if coll.name == dflt_name:
dflt_name = '%s_NEW' % dflt_name
collection.add()
index += 1
setattr(rm, coll_idx, index)
collection[-1].name = dflt_name
elif self.properties.action == 'REMOVE':
group = collection[index]
# get a list of all objects in this group
ob_list = [member.ob_pointer for member in group.members]
collection.remove(index)
setattr(rm, coll_idx, index - 1)
# now tell each object to update
for ob in ob_list:
ob.update_tag(refresh={'OBJECT'})
return {'FINISHED'}
class PRMAN_OT_add_to_vol_aggregate(bpy.types.Operator):
bl_idname = 'renderman.add_to_vol_aggregate'
bl_label = 'Add Selected to Volume Aggregate'
vol_aggregates_index: IntProperty(default=-1)
do_scene_selected: BoolProperty(name="do_scene_selected", default=False)
open_editor: BoolProperty(default=False)
def add_selected(self, context):
scene = context.scene
rm = scene.renderman
vol_aggregates_index = rm.vol_aggregates_index
ob = getattr(context, "selected_obj", None)
if not ob:
return {'FINISHED'}
vol_aggregates = scene.renderman.vol_aggregates
vol_aggregate = vol_aggregates[vol_aggregates_index]
do_add = True
for member in vol_aggregate.members:
if ob == member.ob_pointer:
do_add = False
break
if do_add:
ob_in_group = vol_aggregate.members.add()
ob_in_group.name = ob.name
ob_in_group.ob_pointer = ob
op = getattr(context, 'op_ptr')
if op:
op.selected_obj_name = '0'
ob.update_tag(refresh={'DATA'})
def add_scene_selected(self, context):
scene = context.scene
rm = scene.renderman
if not hasattr(context, 'selected_objects'):
return {'FINISHED'}
vol_aggregates_index = self.properties.vol_aggregates_index
vol_aggregates = scene.renderman.vol_aggregates
vol_aggregate = vol_aggregates[vol_aggregates_index]
for ob in context.selected_objects:
if object_utils._detect_primitive_(ob) not in RMAN_VOL_TYPES:
continue
do_add = True
for member in vol_aggregate.members:
if ob == member.ob_pointer:
do_add = False
break
if do_add:
ob_in_group = vol_aggregate.members.add()
ob_in_group.name = ob.name
ob_in_group.ob_pointer = ob
ob.update_tag(refresh={'DATA'})
def execute(self, context):
if self.properties.do_scene_selected:
self.add_scene_selected(context)
else:
self.add_selected(context)
if self.properties.open_editor:
bpy.ops.scene.rman_open_vol_aggregates_editor('INVOKE_DEFAULT')
return {'FINISHED'}
class PRMAN_OT_remove_from_vol_aggregate(bpy.types.Operator):
bl_idname = 'renderman.remove_from_vol_aggregate'
bl_label = 'Remove Selected from Volume Aggregate'
def execute(self, context):
scene = context.scene
rm = scene.renderman
vol_aggregates_index = rm.vol_aggregates_index
ob = getattr(context, "selected_obj", None)
if not ob:
return {'FINISHED'}
vol_aggregates = scene.renderman.vol_aggregates
vol_aggregate = vol_aggregates[vol_aggregates_index]
for i, member in enumerate(vol_aggregate.members):
if member.ob_pointer == ob:
vol_aggregate.members.remove(i)
ob.update_tag(refresh={'OBJECT'})
break
return {'FINISHED'}
classes = [
COLLECTION_OT_volume_aggregates_add_remove,
PRMAN_OT_add_to_vol_aggregate,
PRMAN_OT_remove_from_vol_aggregate
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass |
prman-pixar/RenderManForBlender | rman_operators/rman_operators_editors/rman_operators_editors_vol_aggregates.py | <gh_stars>100-1000
from bpy.props import (StringProperty, BoolProperty, EnumProperty, IntProperty)
from ...rman_ui.rman_ui_base import CollectionPanel
from ...rfb_logger import rfb_log
from ...rman_operators.rman_operators_collections import return_empty_list
from ...rman_config import __RFB_CONFIG_DICT__ as rfb_config
from ...rfb_utils import object_utils
from ...rfb_utils import scene_utils
import bpy
import re
class RENDERMAN_UL_Volume_Aggregates_List(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
custom_icon = 'OBJECT_DATAMODE'
layout.context_pointer_set("selected_obj", item.ob_pointer)
op = layout.operator('renderman.remove_from_vol_aggregate', text='', icon='REMOVE')
label = item.ob_pointer.name
layout.label(text=label, icon=custom_icon)
class PRMAN_OT_Renderman_Open_Volume_Aggregates_Editor(CollectionPanel, bpy.types.Operator):
bl_idname = "scene.rman_open_vol_aggregates_editor"
bl_label = "RenderMan Volume Aggregates Editor"
bl_description = "Volume Aggregates Editor"
def updated_object_selected_name(self, context):
ob = context.scene.objects.get(self.selected_obj_name, None)
if not ob:
return
if context.view_layer.objects.active:
context.view_layer.objects.active.select_set(False)
ob.select_set(True)
context.view_layer.objects.active = ob
def obj_list_items(self, context):
pattern = re.compile(self.object_search_filter)
scene = context.scene
rm = scene.renderman
if self.do_object_filter and self.object_search_filter == '':
return return_empty_list(label='No Objects Found')
group = rm.vol_aggregates[rm.vol_aggregates_index]
objs_in_group = []
for member in group.members:
objs_in_group.append(member.ob_pointer.name)
items = []
for ob in scene_utils.get_all_volume_objects(scene):
ob_name = ob.name
if ob_name not in objs_in_group:
if self.do_object_filter and not re.match(pattern, ob_name):
continue
items.append((ob_name, ob_name, ''))
if not items:
return return_empty_list(label='No Objects Found')
elif self.do_object_filter:
items.insert(0, ('0', 'Results (%d)' % len(items), '', '', 0))
else:
items.insert(0, ('0', 'Select Object', '', '', 0))
return items
def update_do_object_filter(self, context):
self.selected_obj_name = '0'
do_object_filter: BoolProperty(name="Object Filter",
description="Search and add multiple objects",
default=False,
update=update_do_object_filter)
object_search_filter: StringProperty(name="Object Filter Search", default="")
selected_obj_name: EnumProperty(name="", items=obj_list_items, update=updated_object_selected_name)
def execute(self, context):
return{'FINISHED'}
def draw(self, context):
layout = self.layout
scene = context.scene
rm = scene.renderman
layout.separator()
self._draw_collection(context, layout, rm, "Volume Aggregates",
"renderman.add_remove_volume_aggregates",
"scene.renderman",
"vol_aggregates", "vol_aggregates_index",
default_name='VolumeAggreagte_%d' % len(rm.vol_aggregates))
def draw_objects_item(self, layout, context, item):
row = layout.row()
scene = context.scene
rm = scene.renderman
vol_aggregate = rm.vol_aggregates[rm.vol_aggregates_index]
row = layout.row()
row.separator()
row.prop(self, 'do_object_filter', text='', icon='FILTER', icon_only=True)
if not self.do_object_filter:
row.prop(self, 'selected_obj_name', text='')
col = row.column()
if self.selected_obj_name == '0' or self.selected_obj_name == '':
col.enabled = False
op = col.operator("renderman.add_to_vol_aggregate", text='', icon='ADD')
op.open_editor = False
else:
col.context_pointer_set('op_ptr', self)
col.context_pointer_set('selected_obj', scene.objects[self.selected_obj_name])
op = col.operator("renderman.add_to_vol_aggregate", text='', icon='ADD')
op.vol_aggregates_index = rm.vol_aggregates_index
op.do_scene_selected = False
op.open_editor = False
else:
row.prop(self, 'object_search_filter', text='', icon='VIEWZOOM')
row = layout.row()
row.prop(self, 'selected_obj_name')
col = row.column()
if self.selected_obj_name == '0' or self.selected_obj_name == '':
col.enabled = False
op = col.operator("renderman.add_to_vol_aggregate", text='', icon='ADD')
op.open_editor = False
else:
col.context_pointer_set('op_ptr', self)
col.context_pointer_set('selected_obj', scene.objects[self.selected_obj_name])
op = col.operator("renderman.add_to_vol_aggregate", text='', icon='ADD')
op.vol_aggregates_index = rm.vol_aggregates_index
op.do_scene_selected = False
op.open_editor = False
row = layout.row()
row.template_list('RENDERMAN_UL_Volume_Aggregates_List', "",
vol_aggregate, "members", vol_aggregate, 'members_index', rows=6)
def draw_item(self, layout, context, item):
self.draw_objects_item(layout, context, item)
def cancel(self, context):
if self.event and self.event.type == 'LEFTMOUSE':
bpy.ops.scene.rman_open_vol_aggregates_editor('INVOKE_DEFAULT')
def __init__(self):
self.event = None
def invoke(self, context, event):
wm = context.window_manager
width = rfb_config['editor_preferences']['vol_aggregates_editor']['width']
self.event = event
return wm.invoke_props_dialog(self, width=width)
classes = [
PRMAN_OT_Renderman_Open_Volume_Aggregates_Editor,
RENDERMAN_UL_Volume_Aggregates_List
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass |
MikeVald/CentroMultimedia | CentroMultimedia.py | import Tkinter as tk
import ttk
import PIL
from PIL import Image
from PIL import ImageTk
from vlc import Instance
import time
import os
import sys
#import pathlib
import webbrowser
class VLC:# Crea instancia de VLC
def __init__(self):
self.Player = Instance('--loop')#lo genera en modo bucle
def addPlaylist(self,op): #agrega a la lista de reproduccion dependiendo el tipo de archivo
self.mediaList = self.Player.media_list_new()#crea lista de reproduccion vacia
path = r"/home/pi/Desktop/PFE"
songs = os.listdir(path)#crea lista de los archivos
# for x in songs:
#
# print(x)
for s in songs:#filtra tipo de archivo dependiendo la eleccion
if op==1:#fotos
if '.jpg' in str(s) or 'png' in str(s) or 'jpeg' in str(s):
self.mediaList.add_media(self.Player.media_new(os.path.join(path,s)))
if op==2:#videos
if '.mp4' in str(s) or '.avi' in str(s):
self.mediaList.add_media(self.Player.media_new(os.path.join(path,s)))
if op==3:#musica
if '.mp3' in str(s):
self.mediaList.add_media(self.Player.media_new(os.path.join(path,s)))
self.listPlayer = self.Player.media_list_player_new()#crea lista de reproduccion vacia en la instancia de VLC
self.listPlayer.set_media_list(self.mediaList)#remplaza la lista de reproduccion de la instancia anterior con la nueva
def play(self):#reproduce
self.listPlayer.play()
def next(self):#siguiente
self.listPlayer.next()
def pause(self):#pausa
self.listPlayer.pause()
def previous(self):#anterior
self.listPlayer.previous()
def stop(self):#Alto
self.listPlayer.stop()
def playpause(self):#itera entre reproducir y pausar
if self.listPlayer.is_playing():
self.stop()
else:
self.play()
def repMusica(op):
player.addPlaylist(op)#crea lista de reproduccion
player.play()#reproduce
while player.is_playing():#mientras este reproduciendo, no pasa a la siguiente
time.sleep(1)
player.next()#siguiente elemento
time.sleep(9)
win = tk.Tk() #crea la ventana
win.title("ProyectoEmbebidos")
win.geometry("1000x800")#redimenciona la ventana
lab=ttk.Label(win, text="Bienvenido", font=('Helvetica', 18, "bold")).pack(side="top", fill="x")
style = ttk.Style()
try:#estilo de pantalla
style.theme_create( "yumi", parent="alt", settings={
"TNotebook": {"configure": {"tabmargins": [2, 5, 2, 0] } },
"TNotebook.Tab": {
"configure": {"padding": [5, 1],"font" : ('URW Gothic L', '11', 'bold'), "background": "#CCD1D1" },
"map": {"background": [("selected", "#D6DBDF")],
"expand": [("selected", [1, 1, 1, 0])] } } } )
style.theme_use("yumi")
except:
style.theme_use("yumi")
#Create Tab Control
tabControl=ttk.Notebook(win)
#Tab1
Video=tk.Frame(tabControl,bg='#AAB7B8')
tabControl.add(Video,text='Videos Online')
#Tab2
Musica=tk.Frame(tabControl,bg='#AAB7B8')
tabControl.add(Musica, text='Musica Online')
#Tab3
USB=tk.Frame(tabControl,bg='#AAB7B8')
tabControl.add(USB, text="Contenido de USB")
tabControl.pack(expand=1, fill="both")
#Video online
netflix = Image.open('netflix.png')#carga logo
netflix = netflix.resize((220,120),Image.ANTIALIAS)
netflix = ImageTk.PhotoImage(netflix)#crea instancia de Image
ttk.Button(Video,image=netflix,
command=lambda : webbrowser.open("http://www.netflix.com", new=2, autoraise=True)).pack(padx=20,side="left")#boton para netflix
prime = Image.open('prime.png')
prime = prime.resize((220,120),Image.ANTIALIAS)
prime = ImageTk.PhotoImage(prime)
ttk.Button(Video,image=prime,
command=lambda : webbrowser.open("http://www.primevideo.com", new=2, autoraise=True)).pack(padx=10,side="left")#boton para prime video
blim = Image.open('blim.png')
blim = blim.resize((220,120),Image.ANTIALIAS)
blim = ImageTk.PhotoImage(blim)
ttk.Button(Video,image=blim,
command=lambda : webbrowser.open("http://www.blim.com", new=2, autoraise=True)).pack(padx=10,side="left")#boton para blim
#Musica
spotify = Image.open('spotify.png')
spotify = spotify.resize((220,120),Image.ANTIALIAS)
spotify = ImageTk.PhotoImage(spotify)
ttk.Button(Musica,image=spotify,
command=lambda : webbrowser.open("http://www.spotify.com", new=2, autoraise=True)).pack(padx=20,side="left")#boton para spotify
deezer = Image.open('deezer.png')
deezer = deezer.resize((220,120),Image.ANTIALIAS)
deezer = ImageTk.PhotoImage(deezer)
ttk.Button(Musica,image=deezer,
command=lambda : webbrowser.open("http://www.deezer.com", new=2, autoraise=True)).pack(padx=10,side="left")#boton para deezer
#USB
player = VLC()
fotos = Image.open('fotos.png')
fotos = fotos.resize((220,120),Image.ANTIALIAS)
fotos = ImageTk.PhotoImage(fotos)
ttk.Button(USB,image=fotos,
command=lambda : repMusica(1)).pack(padx=20,side="left")#reproduce imagenes
videos = Image.open('videos.png')
videos = videos.resize((220,120),Image.ANTIALIAS)
videos = ImageTk.PhotoImage(videos)
ttk.Button(USB,image=videos,
command=lambda : repMusica(2)).pack(padx=10,side="left")#reproduce videos
musica = Image.open('musica.png')
musica = musica.resize((220,120),Image.ANTIALIAS)
musica = ImageTk.PhotoImage(musica)
ttk.Button(USB,image=musica,
command=lambda : repMusica(3)).pack(padx=10,side="left")#reproduce musica
#botones para controlar la reproduccion
ttk.Button(USB,text="Prev",
command=lambda : player.previous()).place(x=20, y=400)
ttk.Button(USB,text="PAUSE/PLAY",
command=lambda : player.playpause()).place(x=70, y=400)
ttk.Button(USB,text="STOP",
command=lambda : player.stop()).place(x=170, y=400)
ttk.Button(USB,text="Next",
command=lambda : player.next()).place(x=230, y=400)
#Calling Main()
win.mainloop()
|
lijun99/altar | cuda/cuda/bayesian/cudaCoolingStep.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
# declaration
class cudaCoolingStep:
"""
Encapsulation of the state of the calculation at some particular β value
"""
# public data
beta = None # the inverse temperature
theta = None # a (samples x parameters) matrix
prior = None # a (samples) vector with logs of the sample likelihoods
data = None # a (samples) vector with the logs of the data likelihoods given the samples
posterior = None # a (samples) vector with the logs of the posterior likelihood
# read-only public data
@property
def samples(self):
"""
The number of samples
"""
# encoded in θ
return self.theta.shape[0]
@property
def parameters(self):
"""
The number of model parameters
"""
# encoded in θ
return self.theta.shape[1]
# factories
@classmethod
def start(cls, annealer):
"""
Build the first cooling step by asking {model} to produce a sample set from its
initializing prior, compute the likelihood of this sample given the data, and compute a
(perhaps trivial) posterior
"""
# get the model
model = annealer.model
samples = model.job.chains
precision = model.job.gpuprecision
# build an uninitialized step
step = cls.alloc(samples=samples, parameters=model.parameters, dtype=precision)
# run model here is a bad idea, moved to cudaannealing
# initialize it
# model.cuInitSample(theta=step.theta)
# compute the likelihoods
#model.updateModel(annealer=annealer)
#model.likelihoods(annealer=annealer, step=step, batch=samples)
# return the initialized state
return step
@classmethod
def alloc(cls, samples, parameters, dtype):
"""
Allocate storage for the parts of a cooling step
"""
# dtype must be given to avoid unmatched precisions
# allocate the initial sample set
theta = altar.cuda.matrix(shape=(samples, parameters), dtype=dtype).zero()
# allocate the likelihood vectors
prior = altar.cuda.vector(shape=samples, dtype=dtype).zero()
data = altar.cuda.vector(shape=samples, dtype=dtype).zero()
posterior = altar.cuda.vector(shape=samples, dtype=dtype).zero()
# build one of my instances and return it
return cls(beta=0, theta=theta, likelihoods=(prior, data, posterior))
# interface
def clone(self):
"""
Make a new step with a duplicate of my state
"""
# make copies of my state
beta = self.beta
theta = self.theta.clone()
likelihoods = self.prior.clone(), self.data.clone(), self.posterior.clone()
# make one and return it
return type(self)(beta=beta, theta=theta, likelihoods=likelihoods)
def computePosterior(self, batch=None):
"""
(Re-)Compute the posterior from prior, data, and (updated) beta
"""
batch = batch if batch is not None else self.samples
# copy prior to posterior
self.posterior.copy(self.prior)
# add beta*dataLikelihood
altar.cuda.cublas.axpy(alpha=self.beta, x=self.data, y=self.posterior, batch=batch)
# all done
return self
def copyFromCPU(self, step):
"""
Copy cpu step to gpu step
"""
self.beta = step.beta
self.theta.copy_from_host(source=step.theta)
self.prior.copy_from_host(source=step.prior)
self.data.copy_from_host(source=step.data)
self.posterior.copy_from_host(source=step.posterior)
return self
def copyToCPU(self, step):
"""
copy gpu step to cpu step
"""
step.beta = self.beta
self.theta.copy_to_host(target=step.theta)
self.prior.copy_to_host(target=step.prior)
self.data.copy_to_host(target=step.data)
self.posterior.copy_to_host(target=step.posterior)
return self
# meta-methods
def __init__(self, beta, theta, likelihoods, **kwds):
# chain up
super().__init__(**kwds)
# store the temperature
self.beta = beta
# store the sample set
self.theta = theta
# store the likelihoods
self.prior, self.data, self.posterior = likelihoods
# all done
return
# local
precision = None
# end of file
|
lijun99/altar | models/seismic/seismic/cuda/cudaCascaded.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
from altar.cuda import cublas
from altar.cuda import libcuda
from altar.cuda.models.cudaBayesianEnsemble import cudaBayesianEnsemble
# declaration
class cudaCascaded(cudaBayesianEnsemble, family="altar.models.seismic.cuda.cascaded"):
"""
Cascaded inversion of a model ensemble
"""
# end of file
|
lijun99/altar | models/seismic/seismic/cuda/__init__.py | <reponame>lijun99/altar<gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# the package
import altar
import altar.cuda
# publish the protocol for probability distributions
from altar.cuda.distributions import cudaDistribution as distribution
from altar.cuda.models.cudaBayesian import cudaBayesian as model
# implementations
@altar.foundry(implements=distribution, tip="the Moment Magnitude distribution")
def moment():
# grab the factory
from .cudaMoment import cudaMoment as moment
# attach its docstring
__doc__ = moment.__doc__
# and return it
return moment
# implementations
@altar.foundry(implements=model, tip="static inversion model")
def static():
# grab the factory
from .cudaStatic import cudaStatic as static
# attach its docstring
__doc__ = static.__doc__
# and return it
return static
# implementations
@altar.foundry(implements=model, tip="static inversion model with Cp")
def staticcp():
# grab the factory
from .cudaStaticCp import cudaStaticCp as staticcp
# attach its docstring
__doc__ = staticcp.__doc__
# and return it
return staticcp
# implementations
@altar.foundry(implements=model, tip="kinematic inversion model")
def kinematicg():
# grab the factory
from .cudaKinematicG import cudaKinematicG as kinematicg
# attach its docstring
__doc__ = kinematicg.__doc__
# and return it
return kinematicg
# implementations
@altar.foundry(implements=model, tip="cascaded kinematic inversion model")
def cascaded():
# grab the factory
from .cudaCascaded import cudaCascaded as cascaded
# attach its docstring
__doc__ = cascaded.__doc__
# and return it
return cascaded
# end of file
|
lijun99/altar | cuda/cuda/models/cudaParameterEnsemble.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# currently not used; use psets in cudaBayesian instead
# the package
import altar
import altar.cuda
# the protocol
from .cudaParameter import cudaParameter
# component
class cudaParameterEnsemble(cudaParameter, family="altar.models.parameters.cudaensemble"):
"""
An Ensemble of parameter sets
"""
psets = altar.properties.dict(schema=altar.cuda.models.parameters())
psets.doc = "an ensemble of parameter sets in the model"
# interface
@altar.export
def initialize(self, application):
"""
Initialize my distributions
"""
count = self.cuInitialize(application=application)
return count
def cuInitialize(self, application):
"""
cuda initialize
"""
# get the parameter sets
psets = self.psets
# initialize the offset
parameters = 0
# go through my parameter sets
for name, pset in psets.items():
# initialize the parameter set
parameters += pset.cuInitialize(application=application, offset=pset.offset)
# the total number of parameters is now known, so record it
self.parameters = parameters
# return my parameter count so the next set can be initialized properly
return parameters
def cuInitSample(self, theta, batch=None):
"""
Fill {theta} with an initial random sample from my prior distribution.
"""
# ask my subsets
for pset in self.psets.values():
# and ask each one to verify the sample
pset.prep.cuInitSample(theta=θ, batch=batch)
# all done
return self
def cuEvalPrior(self, theta, prior, batch=None):
"""
Fill {priorLLK} with the log likelihoods of the samples in {theta} in my prior distribution
"""
# ask my subsets
for pset in self.psets.values():
# and ask each one to verify the sample
pset.prior.cuEvalPrior(theta=θ, prior=prior, batch=batch)
# all done
return self
def cuVerify(self, theta, mask, batch=None):
"""
Check whether the samples in {step.theta} are consistent with the model requirements and
update the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# ask my subsets
for pset in self.psets.values():
# and ask each one to verify the sample
pset.prior.cuVerify(theta=θ, mask=mask, batch=batch)
# all done; return the rejection map
return mask
# end of file
|
lijun99/altar | models/seismic/examples/utils/meanModelKinematic.py | <reponame>lijun99/altar
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Author(s): <NAME>
import h5py
import numpy
def MeanModel():
"""
Compute the mean model of the AlTar step results
Also convert the AlTar2 output to theta matrix format in AlTar-1.1
"""
# open input/output
input = h5py.File('step_final.h5', 'r')
output = h5py.File('step_final_v1.h5', 'w')
# use the psets_list as in the model pfg file
psets_list = ['strikeslip', 'dipslip', 'risetime', 'rupturevelocity', 'hypocenter']
# get the number of samples
theta = numpy.asarray(input.get('ParameterSets/'+psets_list[0]))
samples = theta.shape[0]
# create an empty array
theta=numpy.empty(shape=(samples,0), dtype=theta.dtype)
for pset_name in psets_list:
pset = numpy.array(input.get('ParameterSets/'+pset_name))
theta=numpy.concatenate((theta, pset), axis=1)
output.create_dataset('Sample Set', data=theta)
print("converted sample set of size:", theta.shape)
# convert bayesian prob/llk
prior = numpy.asarray(input.get('Bayesian/prior'))
output.create_dataset('Prior Log-likelihood', data=prior)
data = numpy.asarray(input.get('Bayesian/likelihood'))
output.create_dataset('Data Log-likelihood', data=data)
posterior = numpy.asarray(input.get('Bayesian/posterior'))
output.create_dataset('Posterior Log-likelihood', data=posterior)
print("converted likelihood")
# convert Covariance
covariance = numpy.asarray(input.get('Annealer/covariance'))
output.create_dataset('Covariance', data=covariance)
beta = numpy.asarray(input.get('Annealer/beta'))
output.create_dataset('Beta', data=beta)
print("converted covariance matrix")
output.close()
input.close()
# compute the mean model
mean = theta.mean(axis=0)
std = theta.std(axis=0)
print("Mean model and std are saved to text files. Here are the first 10 parameters ... ")
for i in range(min(10, mean.size)):
print(f"{i}: ({mean[i]} +/- {std[i]})")
numpy.savetxt("theta_mean.txt", mean)
numpy.savetxt("theta_std.txt", std)
if __name__ == "__main__":
MeanModel()
|
lijun99/altar | models/regression/examples/synthetic/Linear.py | # Linear Regression synthetic example data generator
import numpy as np
import matplotlib.pyplot as plt
size = 200
true_intercept = 1
true_slope = 2
x = np.linspace(0, 1, size)
# y = a x + b
true_regression_line = true_slope * x + true_intercept
# add noise
y = true_regression_line + np.random.normal(scale=.2, size=size)
# output
np.savetxt('x.txt', x)
np.savetxt('y.txt', y)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, xlabel='x', ylabel='y', title='Generated data and underlying model')
ax.plot(x, y, 'x', label='sampled data')
ax.plot(x, true_regression_line, label='true regression line', lw=2.)
plt.legend(loc=0);
plt.show()
|
lijun99/altar | models/seismic/seismic/__init__.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
# publish the protocol for probability distributions
from altar.distributions import Distribution as distribution
# implementations
@altar.foundry(implements=distribution, tip="the Moment Magnitude distribution")
def moment():
# grab the factory
from .Moment import Moment as moment
# attach its docstring
__doc__ = moment.__doc__
# and return it
return moment
# implementations
@altar.foundry(implements=altar.models.model, tip="static inversion model")
def static():
# grab the factory
from .Static import Static as static
# attach its docstring
__doc__ = static.__doc__
# and return it
return static
# implementations
@altar.foundry(implements=altar.models.model, tip="static inversion model with Cp")
def staticCp():
# grab the factory
from .StaticCp import StaticCp as staticCp
# attach its docstring
__doc__ = staticCp.__doc__
# and return it
return staticCp
# end of file
|
lijun99/altar | altar/tests/altar/application_run.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# bootstrap
if __name__ == "__main__":
# get the class
from altar.shells import application
# instantiate
app = application(name="catmip")
# grab the journal
import journal
# silence the info channel off
journal.info("altar").deactivate()
# run it
status = app.run()
# and share the exit code
raise SystemExit(status)
# end of file
|
lijun99/altar | models/gaussian/gaussian/Gaussian.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# externals
import math
# the package
import altar
# declaration
class Gaussian(altar.models.bayesian, family="altar.models.gaussian"):
"""
A model that emulates the probability density for a single observation of the model
parameters. The observation is treated as normally distributed around a given mean, with a
covariance constructed out of its eigenvalues and a rotation in configuration
space. Currently, only two dimensional parameter spaces are supported.
"""
# user configurable state
parameters = altar.properties.int(default=2)
parameters.doc = "the number of model degrees of freedom"
support = altar.properties.array(default=(-1,1))
support.doc = "the support interval of the prior distribution"
prep = altar.distributions.distribution()
prep.doc = "the distribution used to generate the initial sample"
prior = altar.distributions.distribution()
prior.doc = "the prior distribution"
μ = altar.properties.array(default=(0,0))
μ.doc = 'the location of the central value of the observation'
λ = altar.properties.array(default=(.01, .005))
λ.doc = 'the eigenvalues of the covariance matrix'
φ = altar.properties.dimensional(default=0*altar.units.angle.rad)
φ.doc = 'the orientation of the covariance semi-major axis'
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given a {problem} specification
"""
# chain up
super().initialize(application=application)
# get my random number generator
rng = self.rng
# initialize my distributions
self.prep.initialize(rng=rng)
self.prior.initialize(rng=rng)
# all done
return self
@altar.export
def initializeSample(self, step):
"""
Fill {step.θ} with an initial random sample from my prior distribution.
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# fill it with random numbers from my initializer
self.prep.initializeSample(theta=θ)
# and return
return self
@altar.export
def priorLikelihood(self, step):
"""
Fill {step.prior} with the likelihoods of the samples in {step.theta} in the prior
distribution
"""
# grab my prior pdf
pdf = self.prior
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# and the storage for the prior likelihoods
likelihood = step.prior
# delegate
pdf.priorLikelihood(theta=θ, likelihood=likelihood)
# all done
return self
@altar.export
def dataLikelihood(self, step):
"""
Fill {step.data} with the likelihoods of the samples in {step.theta} given the available
data. This is what is usually referred to as the "forward model"
"""
# cache the inverse of {σ}
σ_inv = self.σ_inv
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# and the storage for the data likelihoods
data = step.data
# find out how many samples in the set
samples = θ.rows
# for each sample in the sample set
for sample in range(samples):
# prepare vector with the sample difference from the mean
δ = θ.getRow(sample)
δ -= self.peak
# storage for {σ_inv . δ}
y = altar.vector(shape=δ.shape).zero()
# compute {σ_inv . δ} and store it in {y}
altar.blas.dsymv(σ_inv.upperTriangular, 1.0, σ_inv, δ, 0.0, y)
# finally, form {δ^T . σ_inv . δ}
v = altar.blas.ddot(δ, y)
# compute and return the log-likelihood of the data given this sample
data[sample] += self.normalization - v/2
# all done
return self
@altar.export
def verify(self, step, mask):
"""
Check whether the samples in {step.theta} are consistent with the model requirements and
update the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# grab my prior
pdf = self.prior
# ask it to verify my samples
pdf.verify(theta=θ, mask=mask)
# all done; return the rejection map
return mask
# meta methods
def __init__(self, **kwds):
# chain up
super().__init__(**kwds)
# local names for the math functions
log, π, cos, sin = math.log, math.pi, math.cos, math.sin
# the number of model parameters
dof = self.parameters
# convert the central value into a vector; allocate
peak = altar.vector(shape=dof)
# and populate
for index, value in enumerate(self.μ): peak[index] = value
# the trigonometry
cos_φ = cos(self.φ)
sin_φ = sin(self.φ)
# the eigenvalues
λ0 = self.λ[0]
λ1 = self.λ[1]
# the eigenvalue inverses
λ0_inv = 1/λ0
λ1_inv = 1/λ1
# build the inverse of the covariance matrix
σ_inv = altar.matrix(shape=(dof, dof))
σ_inv[0,0] = λ0_inv*cos_φ**2 + λ1_inv*sin_φ**2
σ_inv[1,1] = λ1_inv*cos_φ**2 + λ0_inv*sin_φ**2
σ_inv[0,1] = σ_inv[1,0] = (λ1_inv - λ0_inv) * cos_φ * sin_φ
# compute its determinant and store it
σ_lndet = log(λ0 * λ1)
# attach the characteristics of my pdf
self.peak = peak
self.σ_inv = σ_inv
# the log-normalization
self.normalization = -.5*(dof*log(2*π) + σ_lndet)
# all done
return
# implementation details
peak = None # the location of my central value
σ_inv = None # the inverse of my data covariance
normalization = 1 # the normalization factor for my prior distribution
# end of file
|
lijun99/altar | cuda/cuda/models/cudaParameterSet.py | <filename>cuda/cuda/models/cudaParameterSet.py<gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
# the base
from altar.models.Contiguous import Contiguous
# component
class cudaParameterSet(Contiguous, family="altar.cuda.models.parameters.parameterset"):
"""
A contiguous parameter set
"""
# user configurable state
# user configurable state
count = altar.properties.int(default=1)
count.doc = "the number of parameters in this set"
prior = altar.cuda.distributions.distribution()
prior.doc = "the prior distribution"
prep = altar.cuda.distributions.distribution(default=None)
prep.doc = "the distribution to use to initialize this parameter set"
# parameter set offset in theta
# determined by cudaBayesian.psets
offset = 0
def cuInitialize(self, application):
"""
cuda initialization
"""
# get my offset
offset = self.offset
# get my count
count = self.count
# adjust the number of parameters of my distributions
self.prior.parameters = count
self.prior.offset = offset
# initialize my distributions
self.prior.cuInitialize(application=application)
if self.prep is not None:
self.prep.parameters = count
self.prep.offset = offset
self.prep.cuInitialize(application=application)
else:
self.prep = self.prior
# return my parameter count so the next set can be initialized properly
return count
def cuInitSample(self, theta, batch=None):
"""
Fill {theta} with an initial random sample from my prior distribution.
"""
# fill it with random numbers from my {prep} distribution
self.prep.cuInitSample(theta=θ, batch=batch)
# all done
return self
def cuEvalPrior(self, theta, prior, batch=None):
"""
Fill {priorLLK} with the log likelihoods of the samples in {theta} in my prior distribution
"""
# delegate
self.prior.cuEvalPrior(theta=θ, prior=prior, batch=batch)
# all done
return self
@altar.export
def cuVerify(self, theta, mask, batch=None):
"""
Check whether the samples in {step.theta} are consistent with the model requirements and
update the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# ask it to verify my samples
self.prior.cuVerify(theta=θ, mask=mask, batch=batch)
# all done; return the rejection map
return mask
# implementation details
def cuRestrict(self, theta):
"""
Return my portion of the sample matrix {theta}
"""
# find out how many samples in the set
samples = theta.shape[0]
# get my parameter count
parameters = self.count
# get my offset in the samples
offset = self.offset
# find where my samples live within the overall sample matrix:
start = 0, offset
# form the shape of the sample matrix that's mine
shape = samples, parameters
# return a view to the portion of the sample that's mine: i own data in all sample
# rows, starting in the column indicated by my {offset}, and the width of my block is
# determined by my parameter count
return theta.submatrix(start=start, size=shape)
# end of file
|
lijun99/altar | altar/altar/data/DataObs.py | <reponame>lijun99/altar
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
# the protocol
class DataObs(altar.protocol, family="altar.data"):
"""
The protocol that all AlTar norms must satify
"""
# interface
@altar.provides
def initialize(self, application):
"""
initialize data
"""
# framework hooks
@classmethod
def pyre_default(cls, **kwds):
"""
Provide a default norm in case the user hasn't selected one
"""
# the default is {L2}
from .DataL2 import DataL2 as default
# make it accessible
return default
# end of file
|
lijun99/altar | altar/altar/simulations/Recorder.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# the package
import altar
# my protocol
from .Archiver import Archiver as archiver
# an implementation of the archiver protocol
class Recorder(altar.component, family="altar.simulations.archivers.recorder", implements=archiver):
"""
Recorder stores the intermediate simulation state in memory
"""
# user configurable state
theta = altar.properties.path(default="theta.txt")
theta.doc = "the path to the file with the final posterior sample"
sigma = altar.properties.path(default="sigma.txt")
sigma.doc = "the path to the file with the final parameter correlation matrix"
llk = altar.properties.path(default="llk.txt")
llk.doc = "the path to the file with the final posterior log likelihood"
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize me given an {application} context
"""
# all done
return self
@altar.export
def record(self, step, **kwds):
"""
Record the final state of the calculation
"""
# record the samples
step.theta.save(filename=self.theta)
# the covariance matrix
step.sigma.save(filename=self.sigma)
# and the posterior log likelihood
step.posterior.save(filename=self.llk)
# all done
return self
# end of file
|
lijun99/altar | cuda/cuda/norms/cudaL2.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
from altar.cuda import libcudaaltar
from altar.cuda import cublas
# my protocol
from altar.norms.L2 import L2
# declaration
class cudaL2(L2, family="altar.norms.cudal2"):
"""
The L2 norm
"""
# interface
@altar.export
def cuEval(self, data, out=None, batch=None, cdinv=None):
"""
Compute the L2 norm of the given data ||x||
Arguments:
data - matrix (samples x observations)
batch - number of samples to be computed (first rows)
cdinv - inverse covariance matrix (observations x observations) in its Cholesky decomposed form (Upper Triangle)
Return:
out - norm vector (samples)
"""
samples = data.shape[0]
# no batch provided, compute all samples
if batch is None or batch > samples:
batch = samples
# if output is not pre-allocated, allocate it
if out is None:
out = altar.cuda.vector(shape=samples)
# if covariance matrix is provided
if cdinv is not None:
# data [samples][obs] = data[samples][obs] x cdinv[obs][obs]
cublas.trmm(cdinv, data, out=data,
alpha=1.0, uplo=cublas.FillModeUpper, side=cublas.SideRight,
transa = cublas.OpNoTrans, diag=cublas.DiagNonUnit)
# compute the norm
libcudaaltar.cudaL2_norm(data.data, out.data, batch)
# return the result
return out
def cuEvalLikelihood(self, data, constant=0.0, out=None, batch=None, cdinv=None):
"""
Compute the L2 norm data likelihood of the given data const - ||x||^2/2
Arguments:
data - matrix (samples x observations)
batch - number of samples to be computed (first rows)
constant - normalization constant
cdinv - inverse covariance matrix (observations x observations) in its Cholesky decomposed form (Upper Triangle)
Return:
out - data likelihood vector (samples)
"""
samples = data.shape[0]
# no batch provided, compute all samples
if batch is None or batch > samples:
batch = samples
# if output is not pre-allocated, allocate it
if out is None:
out = altar.cuda.vector(shape=samples)
# if covariance matrix is provided
if cdinv is not None:
# data [samples][obs] = data[samples][obs] x cdinv[obs][obs]
cublas.trmm(cdinv, data, out=data,
alpha=1.0, uplo=cublas.FillModeUpper, side=cublas.SideRight,
transa = cublas.OpNoTrans, diag=cublas.DiagNonUnit)
# compute the norm
libcudaaltar.cudaL2_normllk(data.data, out.data, batch, constant)
# return the result
return out
# end of file
|
lijun99/altar | models/seismic/seismic/cuda/cudaStatic.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# This is a copy of cudaLinear
# If used in cascaded model, make sure its parameters are the contiguous parameters in the beginning
# Otherwise, modifications of the following code are needed
# An easy way is to use self.restricted method to extract own parameters from theta
# the package
import altar
import altar.cuda
from altar.cuda import cublas as cublas
from altar.cuda import libcuda
from altar.cuda.models.cudaBayesian import cudaBayesian
import numpy
# declaration
class cudaStatic(cudaBayesian, family="altar.models.seismic.cuda.static"):
"""
cudaLinear with the new cuda framework
"""
# data observations
dataobs = altar.cuda.data.data()
dataobs.default = altar.cuda.data.datal2()
dataobs.doc = "the observed data"
# the file based inputs
green = altar.properties.path(default="static.gf.h5")
green.doc = "the name of the file with the Green functions"
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given a {problem} specification
"""
# chain up
super().initialize(application=application)
# get a cublas handle
self.cublas_handle = self.device.get_cublas_handle()
# load green's function to CPU
self.GF = self.loadFile(filename=self.green, shape=(self.observations, self.parameters))
# make a gpu copy of Green's function
self.gGF = altar.cuda.matrix(shape=self.GF.shape, dtype=self.precision)
# prepare the residuals matrix
self.gDataPred = altar.cuda.matrix(shape=(self.samples, self.observations),
dtype=self.precision)
# merge covariance to green's function
if not self.forwardonly:
self.mergeCovarianceToGF()
# all done
return self
def forwardModelBatched(self, theta, green, prediction, batch, observation=None):
"""
Linear Forward Model prediction= G theta
"""
# whether data observation is provided
# gemm C = alpha A B + beta C
if observation is None:
beta = 0.0
else:
# make a copy
prediction.copy(observation)
# to be subtracted in gemm
beta = -1.0
# forward model
# prediction = Green * theta
# in c/python pred (samples, obs), green (obs, parameters), theta (samples, params)
# cublas.gemm(A=theta, B=green, transa=0, transb=1,
# out=prediction,
# handle=self.cublas_handle,
# alpha=1.0, beta=beta,
# rows=batch)
# use cublas interface directly, as in cascaded problem, only the first few parameters are used
# in column major: translated to pred (obsxsamples) green(param obs) theta (params x samples)
# we therefore use pred = G^T x theta
libcuda.cublas_gemm(self.cublas_handle,
1, 0, # transa, transb
prediction.shape[1], batch, green.shape[1], # m, n, k
1.0, # alpha
green.data, green.shape[1], # A, lda
theta.data, theta.shape[1], # B, ldb
beta,
prediction.data, prediction.shape[1])
# all done
return self
def forwardModel(self, theta, green, prediction, observation=None):
"""
Static/Linear forward model prediction = green * theta
:param theta: a parameter set, vector with size parameters
:param green: green's function, matrix with size (observations, parameters)
:param prediction: data prediction, vector with size observations
:return: data prediction if observation is none; otherwise return residual
"""
if observation is None:
beta = 0.0
else:
# make a copy
prediction.copy(observation)
# to be subtracted in gemm
beta = -1.0
# green (obs, params) theta (params) predic(obs)
# cublas.gemv(handle=self.cublas_handle,
# A=green, trans=cublas.OpNoTrans, x=theta,
# out=prediction,
# alpha=1.0, beta=beta)
# cublas uses column major, geenf is treated as (params, obs)
libcuda.cublas_gemv(self.cublas_handle,
1, # transa = transpose
green.shape[1], green.shape[0], # m, n, or param, obs
1.0, # alpha
green.data, green.shape[1], # A, lda
theta.data, 1, # x, incx
beta, # beta
prediction.data, 1 # y, incy
)
# all done
return self
def cuEvalLikelihood(self, theta, likelihood, batch):
"""
Compute data likelihood from the forward model,
:param theta: parameters, matrix [samples, parameters]
:param likelihood: data likelihood P(d|theta), vector [samples]
:param batch: the number of samples to be computed, batch <=samples
:return: likelihood, in case of model ensembles, data likelihood of this model
is added to the input likelihood
"""
residuals = self.gDataPred
# call forward to caculate the data prediction or its difference between dataobs
self.forwardModelBatched(theta=theta, green=self.gGF,
prediction=residuals, batch=batch,
observation= self.dataobs.gdataObsBatch)
# compute the data likelihood with l2 norm
self.dataobs.cuEvalLikelihood(prediction=residuals,
likelihood=likelihood,
residual=True, batch=batch)
# return the likelihood
return likelihood
def mergeCovarianceToGF(self):
"""
merge data covariance (cd) with green function
"""
# get references for data covariance
cd_inv = self.dataobs.gcd_inv
# get a reference for green's function
green = self.gGF
# copy from CPU
green.copy_from_host(source=self.GF)
# check whether cd is a constant or a matrix
if isinstance(cd_inv, float):
green *= cd_inv
elif isinstance(cd_inv, altar.cuda.matrix):
# (obsxobs) x (obsxparameters) = (obsxparameters)
cublas.trmm(cd_inv, green, out=green, side=cublas.SideLeft,
uplo=cublas.FillModeUpper,
transa = cublas.OpNoTrans,
diag=cublas.DiagNonUnit,
alpha=1.0,
handle = self.cublas_handle)
# all done
return
@altar.export
def forwardProblem(self, application, theta=None):
"""
Perform the forward modeling with given {theta}
"""
import h5py
# get theta
gtheta = theta or self.loadFileToGPU(filename=self.theta_input,
dataset=self.theta_dataset)
# allocate predicted data
gData = altar.cuda.vector(shape=self.observations, dtype = self.precision)
# get a reference for green's function
gGF = self.gGF
# copy from CPU
gGF.copy_from_host(source=self.GF)
# forward model
self.forwardModel(theta=gtheta, green=gGF, prediction=gData)
# save data prediction
h5file = h5py.File(name=self.forward_output.path, mode='a')
# if already exists, del the old dataset
if 'static.Data' in h5file.keys():
del h5file['static.Data']
h5file.create_dataset(name='static.Data', data=gData.copy_to_host(type='numpy'))
h5file.close()
# all done
return
# private data
# inputs
GF = None # the Green functions
gGF = None
gDataPred = None
cublas_handle=None
# end of file
|
lijun99/altar | models/sir/sir/SIR.py | <filename>models/sir/sir/SIR.py
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# author(s): <NAME>
# the package
import altar
from altar.models.BayesianL2 import BayesianL2
# declaration
class SIR(BayesianL2, family="altar.models.sir"):
"""
SIR model in epidemiology
This model assumes five parameters
S0 - The initial value of susceptible population (times the population base factor)
I0 - The initial value of infectious
R0 - The initial value of Recovered/Deaths
β - the average number of contacts per person per time
γ - the rate of recovery or mortality
to generate the time sequence of S(t), I(t) and R(t)
It uses the new cases per day as data observations, or S(t-1)-S(t)
"""
# configurable properties
population_base = altar.properties.float(default=10000)
population_base.doc = 'the base factor for population'
def _SIR_Rate(self, S, I, N, β, γ):
"""
SIR Rate equation
"""
new = β*I*S/N
recovered = γ*I
Sn = S - new
In = I + new - recovered
return Sn, In
def forwardModel(self, theta, prediction):
"""
Forward SIR model
"""
# grab the parameters from theta
S0 = theta[0]*self.population_base
I0 = theta[1]
R0 = theta[2]
# total number of people, a constant
N = S0 + I0 + R0
β = theta[3]
γ = theta[4]
# grab the observations
obs = self.dataobs.dataobs
days = self.observations
# rate equation iterative solver
for day in range(days):
# get the S, I for a new day
S, I = self._SIR_Rate(S0, I0, N, β, γ)
# get the daily new cases
prediction[day] = (S0 - S)-obs[day]
# assign the new values of S, I for new day
S0 = S
I0 = I
# all done
return self
# end of file
|
lijun99/altar | altar/altar/simulations/GSLRNG.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# the package
import altar
# my protocol
from .RNG import RNG as rng
# the random number generator
class GSLRNG(altar.component, family="altar.simulations.rng.gsl", implements=rng):
"""
The protocol for random number generators
"""
# user configurable state
seed = altar.properties.float(default=0)
seed.doc = 'the number with which to seed the generator'
algorithm = altar.properties.str(default='ranlxs2')
algorithm.doc = 'the random number generator algorithm'
# public data
rng = None # the handle to the wrapper from the {gsl} package
# required behavior
@altar.export
def initialize(self, **kwds):
"""
Initialize the random number generator
"""
# nothing to do
return self
# meta-methods
def __init__(self, **kwds):
# chain up
super().__init__(**kwds)
# build the random number generator
self.rng = altar.rng(algorithm=self.algorithm)
# and seed
self.rng.seed(seed=self.seed)
# all done
return
# implementation details
def show(self):
"""
Display some information about me
"""
# get the journal
import journal
# make a channel
channel = journal.debug("altar.init")
# show me
channel.line(f"{self.pyre_name}:")
channel.line(f" seed: {self.seed}")
channel.line(f" algorithm: {self.algorithm}")
channel.log()
# all done
return self
# end of file
|
lijun99/altar | cuda/cuda/distributions/__init__.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
# use the cpu protocol
from altar.distributions.Distribution import Distribution as distribution
# get default
from .cudaDistribution import cudaDistribution as cudaDistribution
@altar.foundry(implements=distribution, tip="the cuda cudaUniform probability distribution")
def uniform():
# grab the factory
from .cudaUniform import cudaUniform as uniform
# attach its docstring
__doc__ = uniform.__doc__
# and return it
return uniform
@altar.foundry(implements=distribution, tip="the cuda gaussian probability distribution")
def gaussian():
# grab the factory
from .cudaGaussian import cudaGaussian as gaussian
# attach its docstring
__doc__ = gaussian.__doc__
# and return it
return gaussian
@altar.foundry(implements=distribution, tip="the cuda truncated gaussian probability distribution")
def tgaussian():
# grab the factory
from .cudaTGaussian import cudaTGaussian as tgaussian
# attach its docstring
__doc__ = tgaussian.__doc__
# and return it
return tgaussian
@altar.foundry(implements=distribution, tip="the preset distribution")
def preset():
# grab the factory
from .cudaPreset import cudaPreset as preset
# attach its docstring
__doc__ = preset.__doc__
# and return it
return preset
# end of file
|
lijun99/altar | altar/altar/models/Ensemble.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# my superclass
from .Bayesian import Bayesian
# declaration
class Ensemble(Bayesian, family="altar.models.ensemble"):
"""
A collection of AlTar models that comprise a single model
"""
# my collection
models = altar.properties.list(schema=model())
models.doc = "the collection of models in this ensemble"
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given an {application} context
"""
# chain up
super().initialize(application=application)
# go through my models
for model in self.models:
# and initialize each one
model.initialize(application=application)
# all done
return self
# services
@altar.export
def initializeSample(self, step):
"""
Fill {step.theta} with an initial random sample from my prior distribution.
"""
# ask each of my models
for model in self.models:
# to initialize their portion of the samples in {step}
model.initializeSample(step=step)
# all done
return self
@altar.export
def priorLikelihood(self, step):
"""
Fill {step.prior} with the likelihoods of the samples in {step.theta} in the prior
distribution
"""
# ask each of my models
for model in self.models:
# to contribute to the computation of the prior likelihood
model.priorLikelihood(step=step)
# all done
return self
@altar.export
def dataLikelihood(self, step):
"""
Fill {step.data} with the likelihoods of the samples in {step.theta} given the available
data. This is what is usually referred to as the "forward model"
"""
# ask each of my models
for model in self.models:
# to contribute to the computation of the data likelihood
model.priorLikelihood(step=step)
# all done
return self
@altar.export
def verify(self, step, mask):
"""
Check whether the samples in {step.theta} are consistent with the model requirements and
update the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# ask each of my models
for model in self.models:
# to initialize their portion of the samples in {step}
model.verify(step=step, mask=mask)
# all done
return mask
# end of file
|
lijun99/altar | models/regression/regression/Linear.py | <filename>models/regression/regression/Linear.py
# -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# author(s): <NAME>
# the package
import altar
from altar.models.BayesianL2 import BayesianL2
# declaration
class Linear(BayesianL2, family="altar.models.regression.linear"):
"""
Linear Regression model y= ax +b
"""
# additional model properties
x_file = altar.properties.path(default='x.txt')
x_file.doc = "the input file for x variable"
@altar.export
def initialize(self, application):
"""
Initialize the state of the model
"""
# model specific initialization before superclass
# none for this model
# call the super class initialization
super().initialize(application=application)
# model specific initialization after superclass
# grab data
self.x = self.loadFile(self.x_file)
self.y = self.dataobs.dataobs
# set the return_residual flag
# forward model calculates the residual between prediction and data
self.return_residual = True
# all done, return self
return self
def forwardModel(self, theta, prediction):
"""
Forward Model
:param theta: sampling parameters for one sample
:param prediction: data prediction or residual (prediction - observation)
:return: none
"""
# grab the parameters from theta
slope = theta[0]
intercept = theta[1]
# calculate the residual between data prediction and observation
size = self.observations
for i in range(size):
prediction[i] = slope * self.x[i] + intercept - self.y[i]
# all done
return self
# private variables
x = None
y = None
# end of file
|
lijun99/altar | cuda/cuda/models/cudaBayesianEnsemble.py | <gh_stars>1-10
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
# my superclass
from altar.models.Bayesian import Bayesian
# other
import numpy
# declaration
class cudaBayesianEnsemble(Bayesian, family="altar.models.cudaensemble"):
"""
A collection of AlTar models that comprise a single model
"""
# my collection
models = altar.properties.dict(schema=altar.cuda.models.model())
models.doc = "the collection of models in this ensemble"
parameters = altar.properties.int(default=1)
parameters.doc = "the number of model degrees of freedom"
psets_list = altar.properties.list(default=None)
psets_list.doc = "list of parameter sets, used to set orders"
psets = altar.properties.dict(schema=altar.cuda.models.parameters())
psets.doc = "an ensemble of parameter sets in the model"
# the path of input files
case = altar.properties.path(default="input")
case.doc = "the directory with the input files"
# options for performing forward model only
forwardonly = altar.properties.bool(default=False)
forwardonly.doc = "whether to run the simulation or the forward problem only"
# input theta (one sample)
theta_input = altar.properties.path(default="theta.h5")
theta_input.doc = "the theta input file with a vector of parameters"
theta_dataset = altar.properties.str(default=None)
theta_dataset.doc = "the name/path of the theta dataset in h5 file"
forward_output = altar.properties.path(default="forward_prediction.h5")
forward_output.doc = "the name/path of the file to save forward problem results"
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given an {application} context
"""
# chain up
super().initialize(application=application)
# mount my input data space
self.ifs = self.mountInputDataspace(pfs=application.pfs)
# find out how many samples to work with; equal to the number of chains
self.samples = application.job.chains
# cuda method
self.device = application.controller.worker.device
self.precision = application.job.gpuprecision
# initialize the parametersets
# initialize the offset
parameters = 0
# go through my parameter sets
for name in self.psets_list:
# get the parameter set from psets dictionary
pset = self.psets[name]
# set the offset
pset.offset = parameters
# initialize the pset
parameters += pset.cuInitialize(application=application)
self.parameters = parameters
# go through my models
for name, model in self.models.items():
# and initialize each one
# set child models as embedded
model.embedded = True
# set child models forwardonly
model.forwardonly = self.forwardonly
model.initialize(application=application)
self.cuInitialize(application=application)
self.datallk = altar.cuda.vector(shape=self.samples, dtype=self.precision)
# all done
return self
def cuInitialize(self, application):
"""
cuda initialization
"""
return self
@altar.export
def posterior(self, application):
"""
Sample my posterior distribution
"""
# ask my controller to help me sample my posterior distribution
return self.controller.posterior(model=self)
def cuInitSample(self, theta):
"""
Fill {theta} with an initial random sample from my prior distribution.
"""
# ask my subsets
for name, pset in self.psets.items():
# and ask each one to verify the sample
pset.prep.cuInitSample(theta=theta)
# all done
return self
def cuVerify(self, theta, mask):
"""
Check whether the samples in {step.theta} are consistent with the model requirements and
update the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# ask my subsets
for pset in self.psets.values():
# and ask each one to verify the sample
pset.prior.cuVerify(theta=theta, mask=mask)
# all done; return the rejection map
return mask
def cuEvalPrior(self, theta, prior, batch):
"""
Fill {priorLLK} with the log likelihoods of the samples in {theta} in my prior distribution
"""
batch = batch if batch is not None else theta.rows
# ask my subsets
for pset in self.psets.values():
# and ask each one to verify the sample
pset.prior.cuEvalPrior(theta=theta, prior=prior, batch=batch)
# all done
return self
def cuEvalLikelihood(self, step, batch):
"""
Fill {step.data} with the likelihoods of the samples in {step.theta} given the available
data. This is what is usually referred to as the "forward model"
"""
datallk = self.datallk
# ask each of my models
for name, model in self.models.items():
# to contribute to the computation of the data likelihood
# each model needs to decide how it takes the whole parameter set from an ensemble
# one option is to make a local copy of theta if needed
# model_theta = model.restricted(theta=step.theta, batch=batch)
# another is to use idx_map
model.cuEvalLikelihood(theta=step.theta, likelihood=datallk.zero(), batch=batch)
if model.cascaded:
step.prior += datallk
else:
step.data += datallk
# all done
return self
def cuEvalPosterior(self, step, batch):
"""
Given the {step.prior} and {step.data} likelihoods, compute a generalized posterior using
{step.beta} and deposit the result in {step.post}
"""
# prime the posterior
step.posterior.copy(step.prior)
# compute it; this expression reduces to Bayes' theorem for β->1
altar.cuda.cublas.axpy(alpha=step.beta, x=step.data, y=step.posterior, batch=batch)
# all done
return self
def updateModel(self, annealer):
"""
Update model parameters if needed
:param annealer:
:return:
"""
# default is not updated
out = False
# iterate over embedded models
for name, model in self.models.items():
updated = model.updateModel(annealer=annealer)
out = out or updated
# all done
return out
@altar.export
def likelihoods(self, annealer, step, batch):
"""
Convenience function that computes all three likelihoods at once given the current {step}
of the problem
"""
batch = step.samples if batch is None else batch
# grab the dispatcher
dispatcher = annealer.dispatcher
# notify we are about to compute the prior likelihood
dispatcher.notify(event=dispatcher.priorStart, controller=annealer)
# compute the prior likelihood
self.cuEvalPrior(theta=step.theta, prior=step.prior, batch=batch)
# done
dispatcher.notify(event=dispatcher.priorFinish, controller=annealer)
# notify we are about to compute the likelihood of the prior given the data
dispatcher.notify(event=dispatcher.dataStart, controller=annealer)
# compute it
self.cuEvalLikelihood(step=step, batch=batch)
# done
dispatcher.notify(event=dispatcher.dataFinish, controller=annealer)
# finally, notify we are about to put together the posterior at this temperature
dispatcher.notify(event=dispatcher.posteriorStart, controller=annealer)
# compute it
self.cuEvalPosterior(step=step, batch=batch)
# done
dispatcher.notify(event=dispatcher.posteriorFinish, controller=annealer)
# enable chaining
return self
@altar.export
def verify(self, step, mask):
"""
Check whether the samples in {step.theta} are consistent with the model requirements and
update the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
self.cuVerify(step, mask, batch=step.shape[0])
return self
# implementation details
def mountInputDataspace(self, pfs):
"""
Mount the directory with my input files
"""
# attempt to
try:
# mount the directory with my input data
ifs = altar.filesystem.local(root=self.case)
# if it fails
except altar.filesystem.MountPointError as error:
# grab my error channel
channel = self.error
# complain
channel.log(f"bad case name: '{self.case}'")
channel.log(str(error))
# and bail
raise SystemExit(1)
# if all goes well, explore it and mount it
pfs["inputs"] = ifs.discover()
# all done
return ifs
def loadFile(self, filename, shape=None, dataset=None, dtype=None):
"""
Load an input file to a numpy array (for both float32/64 support)
Supported format:
1. text file in '.txt' suffix, stored in prescribed shape
2. binary file with '.bin' or '.dat' suffix,
the precision must be same as the desired gpuprecision,
and users must specify the shape of the data
3. (preferred) hdf5 file in '.h5' suffix (preferred)
the metadata of shape, precision is included in .h5 file
:param filename: str, the input file name
:param shape: list of int
:param dataset: str, name/key of dataset for h5 input only
:return: output numpy.array
"""
# decide the data type of the loaded vector/matrix
dtype = dtype or self.precision
ifs = self.ifs
channel = self.error
try:
# get the path to the file
file = ifs[filename]
except not ifs.NotFoundError:
channel.log(f"no file '{filename}' found in '{ifs.path()}'")
raise
else:
# get the suffix to determine type
suffix = file.uri.suffix
# use .txt for non-binary input
if suffix == '.txt':
# load to a cpu array
cpuData = numpy.loadtxt(file.uri.path, dtype=dtype)
# binary data
elif suffix == '.bin' or suffix == '.dat':
# check shape
if shape is None:
# check whether I can get shape from output
if out is None:
raise channel.log(f"must specify shape for binary input '{filename}'")
else:
shape = out.shape
# read and reshape, users need to check the precision
cpuData = numpy.fromfile(file.uri.path, dtype=dtype)
# hdf5 file
elif suffix == '.h5':
# get support
import h5py
# open
h5file = h5py.File(file.uri.path, 'r')
# get the desired dataset
if dataset is None:
# if not provided, assume the only or first dataset as default
dataset = list(h5file.keys())[0]
cpuData = numpy.asarray(h5file.get(dataset), dtype=dtype)
h5file.close()
if shape is not None:
cpuData = cpuData.reshape(shape)
# all done
return cpuData
def loadFileToGPU(self, filename, shape=None, dataset=None, out=None, dtype=None):
"""
Load an input file to a gpu (for both float32/64 support)
Supported format:
1. text file in '.txt' suffix, stored in prescribed shape
2. binary file with '.bin' or '.dat' suffix,
the precision must be same as the desired gpuprecision,
and users must specify the shape of the data
3. (preferred) hdf5 file in '.h5' suffix (preferred)
the metadata of shape, precision is included in .h5 file
:param filename: str, the input file name
:param shape: list of int
:param dataset: str, name/key of dataset for h5 input only
:return: out altar.cuda.matrix/vector
"""
dtype = dtype or self.precision
# load to cpu as a numpy array at fist
cpuData = self.loadFile(filename=filename, shape=shape, dataset=dataset, dtype=dtype)
# if output gpu matrix/vector is not pre-allocated
if out is None:
# if vector
if cpuData.ndim == 1:
out = altar.cuda.vector(shape=cpuData.shape[0], dtype=dtype)
# if matrix
elif cpuData.ndim == 2:
out = altar.cuda.matrix(shape=cpuData.shape, dtype=dtype)
else:
channel = self.error
raise channel.log(f"unsupported data dimension {cpuData.shape}")
out.copy_from_host(source=cpuData)
# all done
return out
@altar.export
def forwardProblem(self, application, theta=None):
"""
Perform the forward modeling with given {theta}
"""
# only for one set of parameters
theta = theta or self.loadFileToGPU(filename=self.theta_input,
dataset=self.theta_dataset)
for name, model in self.models.items():
# to contribute to the computation of the data likelihood
# use the ensemble output path if not provided for each model
# model needs to decide how to treat the whole parameter set
model.forward_output = self.forward_output
model.forwardProblem(application=application, theta=theta)
return
# local
datallk = None
# end of file
|
lijun99/altar | altar/altar/bayesian/Metropolis.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# externals
import math
# the package
import altar
# my protocol
from .Sampler import Sampler as sampler
# declaration
class Metropolis(altar.component, family="altar.samplers.metropolis", implements=sampler):
"""
The Metropolis algorithm as a sampler of the posterior distribution
"""
# types
from .CoolingStep import CoolingStep
# user configurable state
scaling = altar.properties.float(default=.1)
scaling.doc = 'the parameter covariance Σ is scaled by the square of this'
acceptanceWeight = altar.properties.float(default=8.0/9.0)
acceptanceWeight.doc = 'the weight of accepted samples during covariance rescaling'
rejectionWeight = altar.properties.float(default=1.0/9.0)
rejectionWeight.doc = 'the weight of rejected samples during covariance rescaling'
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize me and my parts given an {application} context
"""
# pull the chain length from the job specification
self.steps = application.job.steps
# get the capsule of the random number generator
rng = application.rng.rng
# set up the distribution for building the sample multiplicities; use a strictly
# positive distribution to avoid generating candidates with zero displacement
self.uniform = altar.pdf.uniform_pos(rng=rng)
# set up the distribution for the random walk displacement vectors
self.uninormal = altar.pdf.ugaussian(rng=rng)
# all done
return self
@altar.export
def samplePosterior(self, annealer, step):
"""
Sample the posterior distribution
"""
# grab the dispatcher
dispatcher = annealer.dispatcher
# notify we have started sampling the posterior
dispatcher.notify(event=dispatcher.samplePosteriorStart, controller=annealer)
# prepare the sampling pdf
self.prepareSamplingPDF(annealer=annealer, step=step)
# walk the chains
statistics = self.walkChains(annealer=annealer, step=step)
# notify we are done sampling the posterior
dispatcher.notify(event=dispatcher.samplePosteriorFinish, controller=annealer)
# all done
return statistics
@altar.provides
def resample(self, annealer, statistics):
"""
Update my statistics based on the results of walking my Markov chains
"""
# update the scaling of the parameter covariance matrix
self.adjustCovarianceScaling(*statistics)
# all done
return
# implementation details
def prepareSamplingPDF(self, annealer, step):
"""
Re-scale and decompose the parameter covariance matrix, in preparation for the
Metropolis update
"""
# get the dispatcher
dispatcher = annealer.dispatcher
# notify we have started preparing the sampling PDF
dispatcher.notify(event=dispatcher.prepareSamplingPDFStart, controller=annealer)
# unpack what i need
Σ = step.sigma.clone()
# scale it
Σ *= self.scaling**2
# compute its Cholesky decomposition
self.sigma_chol = altar.lapack.cholesky_decomposition(Σ)
# notify we are done preparing the sampling PDF
dispatcher.notify(event=dispatcher.prepareSamplingPDFFinish, controller=annealer)
# all done
return
def walkChains(self, annealer, step):
"""
Run the Metropolis algorithm on the Markov chains
"""
# get the model
model = annealer.model
# and the event dispatcher
dispatcher = annealer.dispatcher
# unpack what i need from the cooling step
β = step.beta
θ = step.theta
prior = step.prior
data = step.data
posterior = step.posterior
# get the parameter covariance
Σ_chol = self.sigma_chol
# the sample geometry
samples = step.samples
parameters = step.parameters
# a couple of functions from the math module
exp = math.exp
log = math.log
# reset the accept/reject counters
accepted = rejected = unlikely = 0
# allocate some vectors that we use throughout the following
# candidate likelihoods
cprior = altar.vector(shape=samples)
cdata = altar.vector(shape=samples)
cpost = altar.vector(shape=samples)
# a fake covariance matrix for the candidate steps, just so we don't have to rebuild it
# every time
csigma = altar.matrix(shape=(parameters,parameters))
# the mask of samples rejected due to model constraint violations
rejects = altar.vector(shape=samples)
# and a vector with random numbers for the Metropolis acceptance
dice = altar.vector(shape=samples)
# step all chains together
for step in range(self.steps):
# notify we are advancing the chains
dispatcher.notify(event=dispatcher.chainAdvanceStart, controller=annealer)
# initialize the candidate sample by randomly displacing the current one
cθ = self.displace(sample=θ)
# initialize the likelihoods
likelihoods = cprior.zero(), cdata.zero(), cpost.zero()
# and the covariance matrix
csigma.zero()
# build a candidate state
candidate = self.CoolingStep(beta=β, theta=cθ,
likelihoods=likelihoods, sigma=csigma)
# the random displacement may have generated candidates that are outside the
# support of the model, so we must give it an opportunity to reject them;
# notify we are starting the verification process
dispatcher.notify(event=dispatcher.verifyStart, controller=annealer)
# reset the mask and ask the model to verify the sample validity
model.verify(step=candidate, mask=rejects.zero())
# make the candidate a consistent set by replacing the rejected samples with copies
# of the originals from {θ}
for index, flag in enumerate(rejects):
# if this sample was rejected
if flag:
# copy the corresponding row from {θ} into {candidate}
cθ.setRow(index, θ.getRow(index))
# notify that the verification process is finished
dispatcher.notify(event=dispatcher.verifyFinish, controller=annealer)
# compute the likelihoods
model.likelihoods(annealer=annealer, step=candidate)
# build a vector to hold the difference of the two posterior likelihoods
diff = cpost.clone()
# subtract the previous posterior
diff -= posterior
# randomize the Metropolis acceptance vector
dice.random(self.uniform)
# notify we are starting accepting samples
dispatcher.notify(event=dispatcher.acceptStart, controller=annealer)
# accept/reject: go through all the samples
for sample in range(samples):
# a candidate is rejected if the model considered it invalid
if rejects[sample]:
# nothing to do: θ, priorL, dataL, and postL contain the right statistics
# for this sample; just update the rejection count
rejected += 1
# and move on
continue
# a candidate is also rejected if the model considered it less likely than the
# original and it wasn't saved by the {dice}
if log(dice[sample]) > diff[sample]:
# nothing to do: θ, priorL, dataL, and postL contain the right statistics
# for this sample; just update the unlikely count
unlikely += 1
# and move on
continue
# otherwise, update the acceptance count
accepted += 1
# copy the candidate sample
θ.setRow(sample, cθ.getRow(sample))
# and its likelihoods
prior[sample] = cprior[sample]
data[sample] = cdata[sample]
posterior[sample] = cpost[sample]
# notify we are done accepting samples
dispatcher.notify(event=dispatcher.acceptFinish, controller=annealer)
# notify we are done advancing the chains
dispatcher.notify(event=dispatcher.chainAdvanceFinish, controller=annealer)
# all done
return accepted, rejected, unlikely
def displace(self, sample):
"""
Construct a set of displacement vectors for the random walk from a distribution with zero
mean and my covariance
"""
# get my decomposed covariance
Σ_chol = self.sigma_chol
# build a set of random displacement vectors; note that, for convenience, this starts
# out as (parameters x samples), i.e. the transpose of what we need
δT = altar.matrix(shape=tuple(reversed(sample.shape))).random(pdf=self.uninormal)
# multiply the displacement vectors by the decomposed covariance
δT = altar.blas.dtrmm(
Σ_chol.sideLeft, Σ_chol.lowerTriangular, Σ_chol.opNoTrans, Σ_chol.nonUnitDiagonal,
1, Σ_chol, δT)
# allocate the transpose
δ = altar.matrix(shape=sample.shape)
# fill it
δT.transpose(δ)
# offset it by the original sample
δ += sample
# and return it
return δ
def adjustCovarianceScaling(self, accepted, rejected, unlikely):
"""
Compute a new value for the covariance sacling factor based on the acceptance/rejection
ratio
"""
# unpack my weights
aw = self.acceptanceWeight
rw = self.rejectionWeight
# compute the acceptance ratio
acceptance = accepted / (accepted + rejected + unlikely)
# the fudge factor
kc = aw*acceptance + rw
# don't let it get too small
if kc < .01: kc = .01
# or too big
if kc > 1.: kc = 1.
# store it
self.scaling = kc
# and return
return self
# private data
steps = 1 # the length of each Markov chain
uniform = None # the distribution of the sample multiplicities
uninormal = None # the distribution of random walk displacement vectors
sigma_chol = None # placeholder for the scaled and decomposed parameter covariance matrix
dispatcher = None # a reference to the event dispatcher
# end of file
|
lijun99/altar | cuda/cuda/bayesian/__init__.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
# and the protocols
from altar.bayesian.Controller import Controller as controller
from altar.bayesian.Sampler import Sampler as sampler
from altar.bayesian.Scheduler import Scheduler as scheduler
# implementations
@altar.foundry(implements=sampler, tip="the Metropolis algorithm as a Bayesian sampler")
def metropolis():
# grab the factory
from .cudaMetropolis import cudaMetropolis as metropolis
# attach its docstring
__doc__ = metropolis.__doc__
# and return it
return metropolis
# implementations
@altar.foundry(implements=sampler, tip="the Metropolis sampler with targeted correlation")
def metropolisvaryingsteps():
# grab the factory
from .cudaMetropolisVaryingSteps import cudaMetropolisVaryingSteps as metropolisvarysteps
# attach its docstring
__doc__ = metropolisvaryingsteps.__doc__
# and return it
return metropolisvaryingsteps
# implementations
@altar.foundry(implements=sampler, tip="the Metropolis sampler with a targeted acceptance rate")
def adaptivemetropolis():
# grab the factory
from .cudaAdaptiveMetropolis import cudaAdaptiveMetropolis as adaptivemetropolis
# attach its docstring
__doc__ = adaptivemetropolis.__doc__
# and return it
return adaptivemetropolis
# end of file
|
lijun99/altar | models/cdm/cdm/Source.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# framework
import altar
# externals
import numpy
from math import sqrt, pi as π
# library
from .libcdm import CDM
# declaration
class Source:
"""
The source response for a Compound Dislocation Model in an elastic half space.
"""
# public data
# location
x = 0
y = 0
d = 0
# semi-axes of the CDM along the X, Y, and Z axes (before applying the rotation)
ax = 0
ay = 0
az = 0
# clockwise rotation angles about X, Y, Z axes that define the orientation of the CDM
omegaX = 0
omegaY = 0
omegaZ = 0
# opening (tensile component of the Burgers vector) of the rectangular dislocation
opening = 0
# material properties
v = .25 # Poisson ratio
# interface
def displacements(self, locations, los):
"""
Compute the expected displacements at a set of observation locations from a compound
(triaxial) dislocation source at depth.
"""
# the location of the source
x_src = self.x
y_src = self.y
d_src = self.d
# clockwise rotation angles about x, y, z axes
omegaX_src = self.omegaX
omegaY_src = self.omegaY
omegaZ_src = self.omegaZ
# semi-lengths
ax_src = self.ax
ay_src = self.ay
az_src = self.az
# opening
opening = self.opening
# get the material properties
v = self.v
# from locations, a vector of (x,y) tuples, create the flattened vectors Xf, Yf required by
# CDM
Xf = numpy.zeros(len(locations), dtype=float)
Yf = numpy.zeros(len(locations), dtype=float)
for i, loc in enumerate(locations):
Xf[i] = loc[0]
Yf[i] = loc[1]
# allocate space for the result
u = altar.vector(shape=len(locations))
# compute the displacements
ue, un, uv = CDM(X=Xf, Y=Yf, X0=x_src, Y0=y_src, depth=d_src,
ax=ax_src, ay=ay_src, az=az_src,
omegaX=omegaX_src, omegaY=omegaY_src, omegaZ=omegaZ_src,
opening=opening, nu=v)
# go through each observation location
for idx, (ux,uy,uz) in enumerate(zip(ue, un, uv)):
# project the expected displacement along LOS and store
u[idx] = ux * los[idx,0] + uy * los[idx,1] + uz * los[idx,2]
# all done
return u
# meta-methods
def __init__(self, x=x, y=y, d=d,
ax=ax, ay=ay, az=az, omegaX=omegaX, omegaY=omegaY, omegaZ=omegaZ,
opening=opening, v=v, **kwds):
# chain up
super().__init__(**kwds)
# store the location
self.x = x
self.y = y
self.d = d
# the semi-axes
self.ax = ax
self.ay = ay
self.az = az
# the rotation angles
self.omegaX = omegaX
self.omegaY = omegaY
self.omegaZ = omegaZ
# the opening
self.opening = opening
# and the Poisson ratio
self.v = v
# the strength
self.dV = 4*(ax*ay + ax*az + ay*az) * opening
# all done
return
# end of file
|
lijun99/altar | altar/altar/models/BayesianL2.py | <reponame>lijun99/altar
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
# my protocol
from .Bayesian import Bayesian
# other
import numpy
# declaration
class BayesianL2(Bayesian, family="altar.models.bayesianl2"):
"""
A (Simplified) Bayesian Model with ParameterSets and L2 data norm
"""
# user configurable state
parameters = altar.properties.int(default=1)
parameters.doc = "the number of model degrees of freedom"
cascaded = altar.properties.bool(default=False)
cascaded.doc = "whether the model is cascaded (annealing temperature is fixed at 1)"
embedded = altar.properties.bool(default=False)
embedded.doc = "whether the model is embedded in an ensemble of models"
psets_list = altar.properties.list(default=None)
psets_list.doc = "list of parameter sets, used to set orders"
psets = altar.properties.dict(schema=altar.models.parameters())
psets.default = dict() # empty
psets.doc = "an ensemble of parameter sets in the model"
dataobs = altar.data.data()
dataobs.default = altar.data.datal2()
dataobs.doc = "observed data"
# the path of input files
case = altar.properties.path(default="input")
case.doc = "the directory with the input files"
idx_map=altar.properties.list(schema=altar.properties.int())
idx_map.default = None
idx_map.doc = "the indices for model parameters in whole theta set"
return_residual = altar.properties.bool(default=True)
return_residual.doc = "the forward model returns residual(True) or prediction(False)"
# protocol obligations
@altar.export
def initialize(self, application):
"""
Initialize the state of the model given an {application} context
"""
# super class method
super().initialize(application=application)
# mount my input data space
self.ifs = self.mountInputDataspace(pfs=application.pfs)
# find out how many samples I will be working with; this equal to the number of chains
self.samples = application.job.chains
# initialize the data
self.dataobs.initialize(application=application)
self.observations = self.dataobs.observations
# initialize the parametersets
# initialize the offset
psets = self.psets
# initialize the offset
offset = 0
for name in self.psets_list:
# get the parameter set from psets dictionary
pset = self.psets[name]
# initialize the parameter set
offset += pset.initialize(model=self, offset=offset)
# the total number of parameters is now known, so record it
self.parameters = offset
# all done
return self
@altar.export
def posterior(self, application):
"""
Sample my posterior distribution
"""
# ask my controller to help me sample my posterior distribution
return self.controller.posterior(model=self)
@altar.export
def initializeSample(self, step):
"""
Fill {step.θ} with an initial random sample from my prior distribution.
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# go through each parameter set
for pset in self.psets.values():
# and ask each one to {prep} the sample
pset.initializeSample(theta=θ)
# and return
return self
@altar.export
def verify(self, step, mask):
"""
Check whether the samples in {step.theta} are consistent with the model requirements and
update the {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# ask my subsets
for pset in self.psets.values():
# and ask each one to verify the sample
pset.verify(theta=θ, mask=mask)
# all done; return the rejection map
return mask
def evalPrior(self, theta, prior):
"""
Fill {priorLLK} with the log likelihoods of the samples in {theta} in my prior distribution
"""
# ask my subsets
for pset in self.psets.values():
# and ask each one to verify the sample
pset.priorLikelihood(theta, prior)
# all done
return self
def forwardModel(self, theta, prediction):
"""
The forward model for a single set of parameters
"""
# i don't know what to do, so...
raise NotImplementedError(
f"model '{type(self).__name__}' must implement 'forwardModel'")
def forwardModelBatched(self, theta, prediction):
"""
The forward model for a batch of theta: compute prediction from theta
also return {residual}=True, False if the difference between data and prediction is computed
"""
# The default method computes samples one by one
batch = self.samples
# create a prediction vector
prediction_sample = altar.vector(shape=self.observations)
# iterate over samples
for sample in range(batch):
# obtain the sample (one set of parameters)
theta_sample = theta.getRow(sample)
# call the forward model
self.forwardModel(theta=theta_sample, prediction=prediction_sample)
# copy to the prediction matrix
prediction.setRow(sample, prediction_sample)
# all done
return self
def evalDataLikelihood(self, theta, likelihood):
"""
calculate data likelihood and add it to step.prior or step.data
"""
# This method assumes that there is a forwardModelBatched defined
# Otherwise, please define your own version of this method
# create a matrix for the prediction (samples, observations)
prediction = altar.matrix(shape=(self.samples, self.observations))
# survey forward model whether it computes residual or not
returnResidual = self.return_residual
# call forwardModel to calculate the data prediction or its difference between dataobs
self.forwardModelBatched(theta=theta, prediction=prediction)
# call data to calculate the l2 norm
self.dataobs.evalLikelihood(prediction=prediction, likelihood=likelihood, residual=returnResidual)
# all done
return self
def evalPosterior(self, step):
"""
Given the {step.prior} and {step.data} likelihoods, compute a generalized posterior using
{step.beta} and deposit the result in {step.post}
"""
# prime the posterior
step.posterior.copy(step.prior)
# compute it; this expression reduces to Bayes' theorem for β->1
altar.blas.daxpy(step.beta, step.data, step.posterior)
# all done
return self
@altar.export
def likelihoods(self, annealer, step):
"""
Convenience function that computes all three likelihoods at once given the current {step}
of the problem
"""
batch = step.samples
# grab the dispatcher
dispatcher = annealer.dispatcher
# notify we are about to compute the prior likelihood
dispatcher.notify(event=dispatcher.priorStart, controller=annealer)
# compute the prior likelihood
self.evalPrior(theta=step.theta, prior=step.prior)
# done
dispatcher.notify(event=dispatcher.priorFinish, controller=annealer)
# notify we are about to compute the likelihood of the prior given the data
dispatcher.notify(event=dispatcher.dataStart, controller=annealer)
# grab the portion of the sample that's mine
θ = self.restrict(theta=step.theta)
# compute it
self.evalDataLikelihood(theta=θ, likelihood=step.data)
# done
dispatcher.notify(event=dispatcher.dataFinish, controller=annealer)
# finally, notify we are about to put together the posterior at this temperature
dispatcher.notify(event=dispatcher.posteriorStart, controller=annealer)
# compute it
self.evalPosterior(step=step)
# done
dispatcher.notify(event=dispatcher.posteriorFinish, controller=annealer)
# enable chaining
return self
def updateModel(self, annealer):
"""
Update Model parameters if needed
:param annealer:
:return: default is False
"""
return False
# implementation details
def mountInputDataspace(self, pfs):
"""
Mount the directory with my input files
"""
# attempt to
try:
# mount the directory with my input data
ifs = altar.filesystem.local(root=self.case)
# if it fails
except altar.filesystem.MountPointError as error:
# grab my error channel
channel = self.error
# complain
channel.log(f"bad case name: '{self.case}'")
channel.log(str(error))
# and bail
raise SystemExit(1)
# if all goes well, explore it and mount it
pfs["inputs"] = ifs.discover()
# all done
return ifs
def loadFile(self, filename, shape=None, dataset=None, dtype=None):
"""
Load an input file to a gsl vector or matrix (for both float32/64 support)
Supported format:
1. text file in '.txt' suffix, stored in prescribed shape
2. binary file with '.bin' or '.dat' suffix,
the precision must be same as the desired gpuprecision,
and users must specify the shape of the data
3. (preferred) hdf5 file in '.h5' suffix (preferred)
the metadata of shape, precision is included in .h5 file
:param filename: str, the input file name
:param shape: list of int
:param dataset: str, name/key of dataset for h5 input only
:return: output gsl vector/matrix
"""
# decide the data type of the loaded vector/matrix
dtype = dtype or self.precision
ifs = self.ifs
channel = self.error
try:
# get the path to the file
file = ifs[filename]
except not ifs.NotFoundError:
channel.log(f"no file '{filename}' found in '{ifs.path()}'")
raise
else:
# get the suffix to determine type
suffix = file.uri.suffix
# use .txt for non-binary input
if suffix == '.txt':
# load to a cpu array
cpuData = numpy.loadtxt(file.uri.path, dtype=dtype)
# binary data
elif suffix == '.bin' or suffix == '.dat':
# check shape
if shape is None:
# check whether I can get shape from output
if out is None:
raise channel.log(f"must specify shape for binary input '{filename}'")
else:
shape = out.shape
# read and reshape, users need to check the precision
cpuData = numpy.fromfile(file.uri.path, dtype=self.precision).reshape(shape)
# hdf5 file
elif suffix == '.h5':
# get support
import h5py
# open
h5file = h5py.File(file.uri.path, 'r')
# get the desired dataset
if dataset is None:
# if not provided, assume the only or first dataset as default
dataset = list(h5file.keys())[0]
cpuData = numpy.asarray(h5file.get(dataset), dtype=dtype)
h5file.close()
if shape is not None:
cpuData = cpuData.reshape(shape)
# convert to gsl data
# all done
return cpuData
def restrict(self, theta):
"""
Return my portion of the sample matrix {theta}
"""
# find out how many samples in the set
samples = theta.rows
# get my parameter count
parameters = self.parameters
# get my offset in the samples
offset = self.offset
# find where my samples live within the overall sample matrix:
start = 0, offset
# form the shape of the sample matrix that's mine
shape = samples, parameters
# return a view to the portion of the sample that's mine: i own data in all sample
# rows, starting in the column indicated by my {offset}, and the width of my block is
# determined by my parameter count
return theta.view(start=start, shape=shape)
# private data
observations = None
device = None
precision = None
ifs = None # the filesystem with the input files
# end of file
|
lijun99/altar | cuda/cuda/distributions/cudaGaussian.py | <filename>cuda/cuda/distributions/cudaGaussian.py
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# get the package
import altar
import altar.cuda.ext.cudaaltar as libcudaaltar
# get the base
from .cudaDistribution import cudaDistribution
# the declaration
class cudaGaussian(cudaDistribution, family="altar.cuda.distributions.gaussian"):
"""
The cuda gaussian probability distribution
"""
# user configurable state
mean = altar.properties.float(default=0.0)
mean.doc = "the mean value"
sigma = altar.properties.float(default=1.0)
sigma.doc = " the standard deviation"
def cuInitSample(self, theta):
"""
Fill my portion of {theta} with initial random values from my distribution.
"""
batch = theta.shape[0]
# call cuda c extension
libcudaaltar.cudaGaussian_sample(theta.data, batch, self.idx_range, (self.mean, self.sigma))
# and return
return self
def cuVerify(self, theta, mask):
"""
Check whether my portion of the samples in {theta} are consistent with my constraints, and
update {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
Arguments:
theta cuArray (samples x total_parameters)
"""
# all samples are valid!
# all done; return the rejection map
return mask
def cuEvalPrior(self, theta, prior, batch):
"""
Fill my portion of {likelihood} with the likelihoods of the samples in {theta}
"""
# call extension
libcudaaltar.cudaGaussian_logpdf(theta.data, prior.data, batch, self.idx_range, (self.mean, self.sigma))
# all done
return self
# local variables
# end of file
|
lijun99/altar | cuda/cuda/distributions/cudaDistribution.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# externals
import math
# get the package
import altar
import altar.cuda
# get the base
from altar.distributions.Base import Base
# the declaration
class cudaDistribution(Base, family="altar.distributions.cudadistribution"):
"""
The base class for probability distributions
"""
# user configurable state from its cpu superclass
parameters = altar.properties.int()
parameters.doc = "the number of model parameters that belong to me"
offset = altar.properties.int(default=0)
offset.doc = "the starting point of my parameters in the overall model state"
# configuration
@altar.export
def initialize(self, rng):
"""
Initialize with the given random number generator
"""
# will recommend a framework change to use application instead of rng
# some distribution might need info from application
# e.g, cascaded need worker id
# so, use cuInitialize instead
return self
@altar.export
def verify(self, theta, mask):
# to satisfy component requirement
# use cuVerify instead
return self
# cuda methods
def cuInitialize(self, application):
"""
cuda specific initialization
"""
self.idx_range = (self.offset, self.offset + self.parameters)
self.device = application.controller.worker.device
self.precision = application.job.gpuprecision
return self
def cuInitSample(self, theta):
"""
cuda process to initialize random samples
"""
return self
def cuVerify(self, theta, mask):
"""
cuda process to verify the validity of samples
"""
return mask
def cuEvalPrior(self, theta, prior):
"""
cuda process to compute the prior
"""
return prior
# private data
device = None
idx_range = None
precision = None
# end of file
|
lijun99/altar | altar/altar/distributions/__init__.py | # -*- python -*-
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# the package
import altar
# publish the protocol for probability distributions
from .Distribution import Distribution as distribution
# implementations
@altar.foundry(implements=distribution, tip="the uniform probability distribution")
def uniform():
# grab the factory
from .Uniform import Uniform as uniform
# attach its docstring
__doc__ = uniform.__doc__
# and return it
return uniform
@altar.foundry(implements=distribution, tip="the gaussian probability distribution")
def gaussian():
# grab the factory
from .Gaussian import Gaussian as gaussian
# attach its docstring
__doc__ = gaussian.__doc__
# and return it
return gaussian
@altar.foundry(implements=distribution, tip="the unit gaussian probability distribution")
def ugaussian():
# grab the factory
from .UnitGaussian import UnitGaussian as ugaussian
# attach its docstring
__doc__ = ugaussian.__doc__
# and return it
return ugaussian
# end of file
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.