Compare commits

..

2 Commits

Author SHA1 Message Date
Matthieu Baumann
ac4af8fb18 fix Circle::intersectBbox 2025-08-29 11:54:31 +02:00
Erik Mellegard
279f93c4ba Fix Circle intersectBBox 2025-06-13 10:14:15 +02:00
147 changed files with 4700 additions and 7020 deletions

View File

@@ -15,13 +15,7 @@ A new [API technical documentation](https://cds-astro.github.io/aladin-lite/) is
[![API Documentation](https://img.shields.io/badge/API-documentation-blue.svg)](https://cds-astro.github.io/aladin-lite)
[![Release page](https://img.shields.io/badge/Release-download-yellow.svg)](https://aladin.cds.unistra.fr/AladinLite/doc/release/)
Try Aladin Lite [here](https://aladin.u-strasbg.fr/AladinLite).
Aladin Lite is made possible thanks to pure Rust core libraries:
* [cdshealpix](https://github.com/cds-astro/cds-healpix-rust) - for HEALPix projection and unprojection to/from sky coordinates
* [mapproj](https://github.com/cds-astro/cds-mapproj-rust) - for computing (un)projections described by a WCS
* [fitsrs](https://github.com/cds-astro/fitsrs) - for reading and parsing FITS images
* [moc](https://github.com/cds-astro/cds-moc-rust) - for parsing, manipulating, and serializing multi-order HEALPix coverage maps
Aladin Lite is available [at this link](https://aladin.u-strasbg.fr/AladinLite).
## Running & editable JS examples
@@ -108,15 +102,14 @@ Aladin Lite can be imported with:
* [X] FITS images support
* [X] WCS parsing, displaying an (JPEG/PNG) image in aladin lite view
* [X] Display customized shapes (e.g. proper motions) from astronomical catalog data
* [X] AVM tags parsing support inside JPEG
* [X] AVM tags parsing support
* [X] Easy sharing of current « view »
* [ ] All VOTable serializations
* [ ] FITS tables
* [X] Creating HiPS instance from an URL
* [X] Local HiPS loading
* [X] Multiple mirrors handling for HiPS tile retrival
* [X] HiPS cube
* [ ] HiPS3D
* [ ] HiPS cube
## Licence

View File

@@ -26,17 +26,8 @@
limit: 1000,
//orderBy: 'nb_ref',
onClick: 'showTable',
onlyFootprints: false,
color: (s) => {
let coo = A.coo();
coo.parse(s.data['RAJ2000'] + ' ' + s.data['DEJ2000'])
let a = (0.1 * Math.pow(10, +s.data.logD25)) / 60;
let b = (1.0 / Math.pow(10, +s.data.logR25)) * a
return `rgb(${s.data["logR25"]*255.0}, ${s.data["logR25"]*255.0}, 255)`
},
hoverColor: 'red',
color: 'yellow',
hoverColor: 'blue',
shape: (s) => {
let coo = A.coo();
coo.parse(s.data['RAJ2000'] + ' ' + s.data['DEJ2000'])

View File

@@ -31,21 +31,15 @@
hoverColor: 'yellow',
selectionColor: 'white',
// Footprint associated to sources
color: (s) => {
// discard drawing a vector for big pm
let totalPmSquared = s.data.pmra*s.data.pmra + s.data.pmdec*s.data.pmdec;
if (totalPmSquared > 6) {
return;
}
return rainbowColorMap((totalPmSquared - 2.5) / 2)
},
shape: (s) => {
// discard drawing a vector for big pm
let totalPmSquared = s.data.pmra*s.data.pmra + s.data.pmdec*s.data.pmdec;
if (totalPmSquared > 6) {
return;
}
let color = rainbowColorMap((totalPmSquared - 2.5) / 2)
// Compute the mean of pm over the catalog sources
if (!pmraMean || !pmdecMean) {
pmraMean = 0, pmdecMean = 0;
@@ -68,24 +62,13 @@
s.dec,
s.ra + dra,
s.dec + ddec,
{color}
)
}
},
() => {
aladin.addCatalog(pmCat);
pmCat.select((s) => {
let totalPmSquared = s.data.pmra*s.data.pmra + s.data.pmdec*s.data.pmdec;
if (totalPmSquared > 6) {
return false;
}
return totalPmSquared < 3.0;
});
});
aladin.addCatalog(pmCat);
});
function rainbowColorMap(value) {
// Ensure value is within range [0, 1]
value = Math.max(0, Math.min(1, value));

View File

@@ -1,22 +0,0 @@
<!doctype html>
<html>
<head>
<meta name="viewport" content="width=device-width, height=device-height, maximum-scale=1.0, initial-scale=1.0, user-scalable=no">
</head>
<body>
<div id="aladin-lite-div" style="width: 500px; height: 500px"></div>
<script type="text/javascript" src="./../dist/aladin.umd.cjs" charset="utf-8"></script>
<script type="text/javascript">
var aladin;
A.init.then(() => {
aladin = A.aladin('#aladin-lite-div', {fullScreen: true, cooFrame: "ICRSd", showSimbadPointerControl: true, showShareControl: true, showShareControl: true, survey: 'https://alasky.cds.unistra.fr/DSS/DSSColor/', fov: 1.0, target: 'M 20', showContextMenu: true});
// customize share URL function
aladin.customizeShareURLFunction(() => {return 'https://sky.esa.int/esasky/?target=' + aladin.getRaDec()[0] + '%20' + aladin.getRaDec()[1] + '&fov=' + aladin.getFoV()[0]})
});
</script>
</body>
</html>

View File

@@ -11,12 +11,11 @@
import A from '../src/js/A.js';
let aladin;
A.init.then(() => {
aladin = A.aladin('#aladin-lite-div', {cooFrame: "icrs", log: false, backgroundColor: 'rgba(0, 0, 0, 255)'});
aladin = A.aladin('#aladin-lite-div', {cooFrame: "icrs", log: false, backgroundColor: 'red'});
aladin.displayFITS(
//'https://fits.gsfc.nasa.gov/samples/FOCx38i0101t_c0f.fits', // url of the fits file
'data/fits/panstarrs-g-m61.fits',
//'https://almascience.eso.org/dataPortal/member.uid___A001_X88f_X297.calibrated_final_cont_Sgr_B1off.pbcor.fits',
{
name: 'm61',
colormap: 'viridis'

View File

@@ -39,7 +39,7 @@ Image Opacity: <br/> <input id="slider" type="range" value=1 min=0 max=1 step=0.
//let fits = aladin.displayFITS('http://goldmine.mib.infn.it/data//B/fits/A04_VC1316_ooooog.fits', 'overlay');
let jpg = aladin.displayJPG(
// the JPG to transform to HiPS
'https://owncloud.tuebingen.mpg.de/index.php/s/sdxfNgcEaaXoBp7/download/nightskycam3_2025_08_07_05_17_30_healpix1024_red.fits',
'https://noirlab.edu/public/media/archives/images/large/noirlab1912a.jpg',
// no options
{
transparency: 1.0,

View File

@@ -1,62 +0,0 @@
<!doctype html>
<html>
<head>
</head>
<body>
<div id="aladin-lite-div" style="width: 768px; height: 512px"></div>
<script>let aladin; let hips;</script>
<script type="module">
import A from '../src/js/A.js';
A.init.then(() => {
aladin = A.aladin(
'#aladin-lite-div',
{
showSimbadPointerControl: true,
projection: 'AIT', // set a projection
fov: 8.0, // initial field of view in degrees
target: '00 42 21.37 +41 07 29.8', // initial target
cooFrame: 'icrs', // set galactic frame
reticleColor: '#ff89ff', // change reticle color
showContextMenu: true,
showFrame: true,
showZoomControl:true,
showSettingsControl:true,
fullScreen: true,
samp: true,
}
);
hips = aladin.newImageSurvey("http://alasky.cds.unistra.fr/HIPS3D/GalfaHI", {
successCallback: (hips) => {
//hips.setFrequency({value: 6.374279333565797E-7, unit: "m"}) // GALFA
}
});
// compressed https://alasky.cds.unistra.fr/test-compression-cubes/DHIGLS/
//hips = aladin.newImageSurvey("http://alasky.cds.unistra.fr/DHIGLS");
//hips = aladin.newImageSurvey("https://alasky.cds.unistra.fr/MUSE3D");
// http://alasky.cds.unistra.fr/LGLBSHI
aladin.setImageLayer(hips)
//hips.setFrequency({value: emMin + delta * i, unit: "m"})
//hips.setFrequency({value: 6.374279333565797E-7, unit: "m"}) // MUSE
//hips.setFrequency({value: 0.21101690259115785, unit: "m"}) // DGHILG
/*let id;
aladin.on("zoomChanged", () => {
if (id)
clearTimeout(id);
id = setTimeout(() => {
console.log("wheel stopped, new cone search here")
}, 500);
})*/
});
</script>
<style>
.aladin-cat-browser-box {
width: 600px;
}
</style>
</body>
</html>

View File

@@ -30,14 +30,9 @@
}
);
hips = aladin.newImageSurvey("https://alasky.cds.unistra.fr/GALFAHI/GALFAHI-Narrow-DR2");
hips = aladin.newImageSurvey("https://alasky.cds.unistra.fr/GALFAHI/GALFAHI-Narrow-DR2/");
aladin.setImageLayer(hips)
setTimeout(() => {
hips.setSliceNumber(100)
}, 1000)
/*let id;
aladin.on("zoomChanged", () => {
if (id)

View File

@@ -8,7 +8,7 @@
import A from '../src/js/A.js';
A.init.then(() => {
let aladin = A.aladin('#aladin-lite-div', {fov: 30, target: "280 +0", projection: "AIT", showShareControl:true, showSettingsControl: true, showContextMenu:true});
aladin.setOverlayImageLayer(A.image(
"https://www.virtualastronomy.org/files/avm_examples/spitzer/ssc2005-24a1.jpg",
{

View File

@@ -14,7 +14,7 @@
{
name: "M61",
wcs: {
NAXIS: 2, // Minimal header
NAXIS: 0, // Minimal header
CTYPE1: 'RA---TAN', // TAN (gnomic) projection
CTYPE2: 'DEC--TAN', // TAN (gnomic) projection
EQUINOX: 2000.0, // Equatorial coordinates definition (yr)

View File

@@ -18,15 +18,16 @@ futures = "0.3.12"
js-sys = "0.3.47"
wasm-bindgen-futures = "0.4.20"
cgmath = "*"
# url-lite = "0.1.0"
url-lite = "0.1.0"
serde_json = "1.0.104"
serde-wasm-bindgen = "0.5"
enum_dispatch = "0.3.8"
wasm-bindgen = "=0.2.92"
#wasm-streams = "0.3.0"
wasm-streams = "0.3.0"
async-channel = "1.8.0"
mapproj = "0.3.0"
fitsrs = "0.3.4"
fitsrs = "0.2.11"
wcs = "0.3.1"
colorgrad = "0.6.2"
[features]
@@ -50,8 +51,7 @@ version = "0.7.3"
[dependencies.moclib]
package = "moc"
git = "https://github.com/cds-astro/cds-moc-rust"
branch = "main"
version = "0.17.0"
[dependencies.serde]
version = "^1.0.183"
@@ -65,7 +65,7 @@ path = "./al-api"
[dependencies.web-sys]
version = "0.3.56"
features = [ "console", "CssStyleDeclaration", "Document", "Element", "HtmlCollection", "CustomEvent", "CustomEventInit", "HtmlElement", "HtmlImageElement", "HtmlCanvasElement", "Blob", "ImageBitmap", "ImageData", "CanvasRenderingContext2d", "WebGlBuffer", "WebGlContextAttributes", "WebGlFramebuffer", "WebGlProgram", "WebGlShader", "WebGlUniformLocation", "WebGlTexture", "WebGlActiveInfo", "Headers", "Window", "Request", "RequestInit", "RequestMode", "RequestCredentials", "Response", "XmlHttpRequest", "XmlHttpRequestResponseType", "PerformanceTiming", "Performance", "Url", "ReadableStream", "File", "FileList",]
features = [ "console", "CssStyleDeclaration", "Document", "Element", "HtmlCollection", "HtmlElement", "HtmlImageElement", "HtmlCanvasElement", "Blob", "ImageBitmap", "ImageData", "CanvasRenderingContext2d", "WebGlBuffer", "WebGlContextAttributes", "WebGlFramebuffer", "WebGlProgram", "WebGlShader", "WebGlUniformLocation", "WebGlTexture", "WebGlActiveInfo", "Headers", "Window", "Request", "RequestInit", "RequestMode", "RequestCredentials", "Response", "XmlHttpRequest", "XmlHttpRequestResponseType", "PerformanceTiming", "Performance", "Url", "ReadableStream", "File", "FileList",]
[dev-dependencies.image-decoder]
package = "image"

View File

@@ -92,7 +92,7 @@ impl fmt::Display for BlendFactor {
BlendFactor::OneMinusSrcAlpha => "OneMinusSrcAlpha",
BlendFactor::OneMinusConstantColor => "OneMinusConstantColor",
};
write!(f, "{str}")
write!(f, "{}", str)
}
}
impl fmt::Display for BlendFunc {
@@ -111,6 +111,6 @@ impl fmt::Display for BlendFunc {
#[cfg(feature = "webgl2")]
BlendFunc::Max => "Max",*/
};
write!(f, "{str}")
write!(f, "{}", str)
}
}

View File

@@ -48,26 +48,14 @@ pub struct HiPSProperties {
hips_initial_fov: Option<f64>,
hips_initial_ra: Option<f64>,
hips_initial_dec: Option<f64>,
// HiPS cube
hips_cube_depth: Option<u32>,
// HiPS 3D keywords
hips_order_freq: Option<u8>,
hips_tile_depth: Option<u8>,
/// Start of spectral coordinates (in meters)
em_min: Option<f32>,
/// End of spectral coordinates (in meters)
em_max: Option<f32>,
// Parametrable by the user
#[allow(unused)]
min_cutout: Option<f32>,
#[allow(unused)]
max_cutout: Option<f32>,
dataproduct_type: Option<DataproductType>,
creator_did: String,
request_credentials: String,
@@ -75,20 +63,6 @@ pub struct HiPSProperties {
}
impl HiPSProperties {
#[inline(always)]
pub fn get_hips_order_freq(&self) -> Option<u8> {
self.hips_order_freq
}
#[inline(always)]
pub fn get_hips_tile_depth(&self) -> Option<u8> {
self.hips_tile_depth
}
#[inline(always)]
pub fn get_dataproduct_type(&self) -> Option<DataproductType> {
self.dataproduct_type
}
#[inline(always)]
pub fn get_url(&self) -> &str {
&self.url
@@ -163,39 +137,18 @@ impl HiPSProperties {
pub fn get_request_mode(&self) -> &str {
&self.request_mode
}
#[inline(always)]
pub fn get_em_min(&self) -> Option<f32> {
self.em_min
}
#[inline(always)]
pub fn get_em_max(&self) -> Option<f32> {
self.em_max
}
}
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[wasm_bindgen]
#[serde(rename_all = "camelCase")]
pub enum ImageExt {
#[serde(alias = "fits", alias = "fits.fz")]
Fits,
Jpeg,
Png,
Webp,
}
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[wasm_bindgen]
#[serde(rename_all = "camelCase")]
pub enum DataproductType {
#[serde(rename = "spectral-cube")]
SpectralCube,
Image,
Cube,
}
impl std::fmt::Display for ImageExt {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {

View File

@@ -7,14 +7,14 @@ edition = "2018"
[dependencies]
js-sys = "0.3.47"
cgmath = "*"
#jpeg-decoder = "0.3.0"
#png = "0.17.6"
fitsrs = "0.3.4"
jpeg-decoder = "0.3.0"
png = "0.17.6"
fitsrs = "0.2.10"
al-api = { path = "../al-api" }
serde = { version = "^1.0.59", features = ["derive"] }
serde_json = "1.0"
serde-wasm-bindgen = "0.4"
# wasm-streams = "0.3.0"
wasm-streams = "0.3.0"
futures = "0.3.25"
colorgrad = "0.6.2"
wasm-bindgen = "0.2.92"

View File

@@ -2,11 +2,11 @@ use std::collections::HashMap;
use colorgrad::Color;
use crate::image::format;
use crate::shader::SendUniformsWithParams;
use crate::Texture2D;
use crate::WebGlContext;
use crate::texture::format::RGBA8U;
use crate::webgl_ctx::WebGlRenderingCtx;
use wasm_bindgen::JsValue;
@@ -68,7 +68,7 @@ fn build_cmaps_texture(gl: &WebGlContext, cmaps: &[Colormap]) -> Result<Texture2
),
];
Texture2D::create_from_raw_pixels::<RGBA8U>(
Texture2D::create_from_raw_pixels::<format::RGBA8U>(
gl,
WIDTH_CMAP_TEX as i32,
cmaps.len() as i32,
@@ -134,14 +134,14 @@ impl Colormaps {
Colormap::new("grayscale", {
colorgrad::CustomGradient::new()
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("inferno", colorgrad::inferno()),
Colormap::new("magma", colorgrad::magma()),
Colormap::new("native", {
colorgrad::CustomGradient::new()
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("parula", {
colorgrad::CustomGradient::new()
@@ -155,7 +155,7 @@ impl Colormaps {
Color::from_rgba8(249, 250, 20, 255),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("plasma", colorgrad::plasma()),
Colormap::new("rainbow", {
@@ -173,7 +173,7 @@ impl Colormaps {
Color::from_rgba8(255, 0, 0, 255),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("rdbu", colorgrad::rd_bu()),
Colormap::new("rdylbu", colorgrad::rd_yl_bu()),
@@ -186,7 +186,7 @@ impl Colormaps {
Color::new(1.0, 1.0, 1.0, 1.0),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("sinebow", colorgrad::sinebow()),
Colormap::new("spectral", colorgrad::spectral()),
@@ -201,7 +201,7 @@ impl Colormaps {
Color::new(1.0, 0.0, 0.0, 1.0),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("green", {
colorgrad::CustomGradient::new()
@@ -210,7 +210,7 @@ impl Colormaps {
Color::new(0.0, 1.0, 0.0, 1.0),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
Colormap::new("blue", {
colorgrad::CustomGradient::new()
@@ -219,7 +219,7 @@ impl Colormaps {
Color::new(0.0, 0.0, 1.0, 1.0),
])
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?
}),
];
@@ -246,7 +246,8 @@ impl Colormaps {
&self.cmaps[id as usize]
} else {
crate::log::console_warn(format!(
"{label:?} is not a valid colormap, replaced with 'grayscale'.",
"{:?} is not a valid colormap, replaced with 'grayscale'.",
label
));
let id_greys = self.get_id("grayscale").unwrap_abort();
&self.cmaps[*id_greys as usize]

View File

@@ -6,11 +6,11 @@ pub struct Bitmap<F> {
format: std::marker::PhantomData<F>,
}
use crate::image::format::ImageFormat;
use crate::image::Image;
use crate::texture::format::TextureFormat;
impl<F> Bitmap<F>
where
F: TextureFormat + Clone,
F: ImageFormat + Clone,
{
pub fn new(image: web_sys::ImageBitmap) -> Self {
Self {
@@ -23,7 +23,7 @@ use crate::texture::Tex3D;
use wasm_bindgen::JsValue;
impl<F> Image for Bitmap<F>
where
F: TextureFormat + Clone,
F: ImageFormat + Clone,
{
fn insert_into_3d_texture<T: Tex3D>(
&self,
@@ -35,7 +35,7 @@ where
Ok(())
}
fn get_size(&self) -> (u32, u32, u32) {
(self.image.width(), self.image.height(), 1)
fn get_size(&self) -> (u32, u32) {
(self.image.width(), self.image.height())
}
}

View File

@@ -7,7 +7,7 @@ pub struct Canvas<F> {
impl<F> Canvas<F>
where
F: TextureFormat + Clone,
F: ImageFormat + Clone,
{
pub fn new(canvas: web_sys::HtmlCanvasElement) -> Self {
Self {
@@ -17,14 +17,14 @@ where
}
}
use crate::image::format::ImageFormat;
use crate::image::Image;
use crate::texture::format::TextureFormat;
use crate::texture::Tex3D;
use cgmath::Vector3;
use wasm_bindgen::JsValue;
impl<F> Image for Canvas<F>
where
F: TextureFormat,
F: ImageFormat,
{
fn insert_into_3d_texture<T: Tex3D>(
&self,
@@ -43,7 +43,7 @@ where
Ok(())
}
fn get_size(&self) -> (u32, u32, u32) {
(self.canvas.width(), self.canvas.height(), 1)
fn get_size(&self) -> (u32, u32) {
(self.canvas.width(), self.canvas.height())
}
}

View File

@@ -1,121 +1,68 @@
use crate::texture::format::TextureFormat;
use crate::texture::format::R8U;
use cgmath::Vector3;
use fitsrs::card::Value;
use fitsrs::hdu::header::Bitpix;
use fitsrs::hdu::header::Header;
use fitsrs::hdu::header::Xtension;
use fitsrs::WCS;
use fitsrs::{Fits, HDU};
use std::fmt::Debug;
use std::io::Cursor;
use std::ops::Range;
use wasm_bindgen::JsValue;
use cgmath::{Vector2, Vector3};
#[derive(Debug)]
pub struct FitsImage<'a> {
// Margin values for HiPS3D cubic tiles
pub trim1: u32,
pub trim2: u32,
pub trim3: u32,
// Image/cube size
pub width: u32,
pub height: u32,
pub depth: u32,
// Bitpix
pub bitpix: Bitpix,
// 1.0 by default
pub bscale: f32,
// 0.0 by default
pub bzero: f32,
// blank
pub blank: Option<f32>,
// optional wcs
pub wcs: Option<WCS>,
// bytes offset where the data bytes are located inside the fits
pub data_byte_offset: Range<usize>,
// raw bytes of the data image (in Big-Endian)
pub raw_bytes: &'a [u8],
pub struct Fits<'a> {
// Tile size
size: Vector2<i32>,
pub data: Data<'a>,
}
fn parse_keyword_as_number<X: Xtension + Debug>(header: &Header<X>, keyword: &str) -> Option<f32> {
match header.get(keyword) {
Some(Value::Integer { value, .. }) => Some(*value as f32),
Some(Value::Float { value, .. }) => Some(*value as f32),
_ => None,
}
use std::borrow::Cow;
use std::fmt::Debug;
#[derive(Debug)]
pub enum Data<'a> {
U8(Cow<'a, [u8]>),
I16(Cow<'a, [i16]>),
I32(Cow<'a, [i32]>),
F32(Cow<'a, [f32]>),
}
use fitsrs::{fits::Fits as FitsData, hdu::data::InMemData};
use std::io::Cursor;
impl<'a> FitsImage<'a> {
/// Get all the hdu images from a fits file
pub fn from_raw_bytes(bytes: &'a [u8]) -> Result<Vec<Self>, JsValue> {
let mut fits = Fits::from_reader(Cursor::new(bytes));
let mut images = vec![];
impl<'a> Fits<'a> {
pub fn from_byte_slice(bytes_reader: &'a mut Cursor<&[u8]>) -> Result<Self, JsValue> {
let FitsData { hdu } = FitsData::from_reader(bytes_reader)
.map_err(|_| JsValue::from_str("Parsing fits error"))?;
while let Some(Ok(hdu)) = fits.next() {
match hdu {
HDU::XImage(hdu) | HDU::Primary(hdu) => {
// Prefer getting the dimension directly from NAXIS1/NAXIS2 instead of from the WCS
// because it may not exist in all HDU images
let width = hdu.get_header().get_xtension().get_naxisn(1);
let height = hdu.get_header().get_xtension().get_naxisn(2);
let header = hdu.get_header();
let xtension = header.get_xtension();
let width = xtension
.get_naxisn(1)
.ok_or_else(|| JsValue::from_str("NAXIS1 not found in the fits"))?;
if let (Some(&width), Some(&height)) = (width, height) {
let depth =
*hdu.get_header().get_xtension().get_naxisn(3).unwrap_or(&1) as u32;
let height = xtension
.get_naxisn(2)
.ok_or_else(|| JsValue::from_str("NAXIS2 not found in the fits"))?;
let header = hdu.get_header();
let bscale = parse_keyword_as_number(header, "BSCALE").unwrap_or(1.0);
let bzero = parse_keyword_as_number(header, "BZERO").unwrap_or(0.0);
let blank = parse_keyword_as_number(header, "BLANK");
let trim1 = parse_keyword_as_number(header, "TRIM1").unwrap_or(0.0) as u32;
let trim2 = parse_keyword_as_number(header, "TRIM2").unwrap_or(0.0) as u32;
let trim3 = parse_keyword_as_number(header, "TRIM3").unwrap_or(0.0) as u32;
let bitpix = hdu.get_header().get_xtension().get_bitpix();
let off = hdu.get_data_unit_byte_offset() as usize;
let len = hdu.get_data_unit_byte_size() as usize;
let data_byte_offset = off..(off + len);
let raw_bytes = &bytes[data_byte_offset.clone()];
let wcs = hdu.wcs().ok();
images.push(Self {
trim1,
trim2,
trim3,
width: width as u32,
height: height as u32,
depth,
bitpix,
bscale,
wcs,
bzero,
blank,
data_byte_offset,
raw_bytes,
});
}
}
_ => (),
let data = hdu.get_data();
let data = match *data {
InMemData::U8(slice) => Data::U8(Cow::Borrowed(slice)),
InMemData::I16(slice) => Data::I16(Cow::Borrowed(slice)),
InMemData::I32(slice) => Data::I32(Cow::Borrowed(slice)),
InMemData::I64(slice) => {
let data = slice.iter().map(|v| *v as i32).collect();
Data::I32(Cow::Owned(data))
}
}
InMemData::F32(slice) => Data::F32(Cow::Borrowed(slice)),
InMemData::F64(slice) => {
let data = slice.iter().map(|v| *v as f32).collect();
Data::F32(Cow::Owned(data))
}
};
if !images.is_empty() {
Ok(images)
} else {
Err(JsValue::from_str("Image HDU not found in the FITS"))
}
Ok(Self {
// Tile size
size: Vector2::new(*width as i32, *height as i32),
// Allocation info of the layout
data,
})
}
}
use crate::{image::Image, texture::Tex3D};
use std::convert::TryInto;
impl Image for FitsImage<'_> {
impl Image for Fits<'_> {
fn insert_into_3d_texture<T: Tex3D>(
&self,
// The texture array
@@ -123,56 +70,98 @@ impl Image for FitsImage<'_> {
// An offset to write the image in the texture array
offset: &Vector3<i32>,
) -> Result<(), JsValue> {
let view = unsafe {
match self.bitpix {
Bitpix::I64 => {
// convert to i64 first
let new_bytes: Vec<_> = self
.raw_bytes
.chunks_exact(8)
.flat_map(|chunk| {
let bytes: [u8; 8] = chunk.try_into().unwrap();
let value = i64::from_be_bytes(bytes);
(value as i32).to_be_bytes()
})
.collect();
R8U::view(&new_bytes)
}
Bitpix::F64 => {
let new_bytes: Vec<_> = self
.raw_bytes
.chunks_exact(8)
.flat_map(|chunk| {
let bytes: [u8; 8] = chunk.try_into().unwrap();
let value = f64::from_be_bytes(bytes);
(value as f32).to_be_bytes()
})
.collect();
R8U::view(&new_bytes)
}
_ => R8U::view(self.raw_bytes),
match &self.data {
Data::U8(data) => {
let view = unsafe { R8UI::view(data) };
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,
offset.z,
self.size.x,
self.size.y,
1,
Some(view.as_ref()),
);
}
};
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x + self.trim1 as i32,
offset.y + self.trim2 as i32,
offset.z + self.trim3 as i32,
self.width as i32,
self.height as i32,
self.depth as i32,
Some(view.as_ref()),
);
Data::I16(data) => {
let view = unsafe { R16I::view(data) };
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,
offset.z,
self.size.x,
self.size.y,
1,
Some(view.as_ref()),
);
}
Data::I32(data) => {
let view = unsafe { R32I::view(data) };
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,
offset.z,
self.size.x,
self.size.y,
1,
Some(view.as_ref()),
);
}
Data::F32(data) => {
let view = unsafe {
R8UI::view(std::slice::from_raw_parts(
data.as_ptr() as *const u8,
data.len() * 4,
))
};
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,
offset.z,
self.size.x,
self.size.y,
1,
Some(view.as_ref()),
);
}
}
Ok(())
}
fn get_size(&self) -> (u32, u32, u32) {
// The true image size is given by ONAXISi keywords
(self.width, self.height, self.depth)
fn get_size(&self) -> (u32, u32) {
(self.size.x as u32, self.size.y as u32)
}
}
use crate::image::format::ImageFormat;
use wasm_bindgen::JsValue;
pub trait FitsImageFormat: ImageFormat {
const BITPIX: i8;
}
use crate::image::R32F;
impl FitsImageFormat for R32F {
const BITPIX: i8 = -32;
}
#[cfg(feature = "webgl2")]
use crate::image::{R16I, R32I, R64F, R8UI};
#[cfg(feature = "webgl2")]
impl FitsImageFormat for R64F {
const BITPIX: i8 = -64;
}
#[cfg(feature = "webgl2")]
impl FitsImageFormat for R32I {
const BITPIX: i8 = 32;
}
#[cfg(feature = "webgl2")]
impl FitsImageFormat for R16I {
const BITPIX: i8 = 16;
}
#[cfg(feature = "webgl2")]
impl FitsImageFormat for R8UI {
const BITPIX: i8 = 8;
}

View File

@@ -1,9 +1,311 @@
use crate::texture::format::PixelType;
use crate::texture::pixel::Pixel;
use al_api::hips::ImageExt;
pub enum Bytes<'a> {
Borrowed(&'a [u8]),
Owned(Vec<u8>),
}
pub trait ImageFormat {
type P: Pixel;
type ArrayBufferView: AsRef<js_sys::Object>;
const NUM_CHANNELS: usize;
const FORMAT: u32;
const INTERNAL_FORMAT: i32;
const TYPE: u32;
const CHANNEL_TYPE: ChannelType;
/// Creates a JS typed array which is a view into wasm's linear memory at the slice specified.
/// This function returns a new typed array which is a view into wasm's memory. This view does not copy the underlying data.
///
/// # Safety
///
/// Views into WebAssembly memory are only valid so long as the backing buffer isn't resized in JS. Once this function is called any future calls to Box::new (or malloc of any form) may cause the returned value here to be invalidated. Use with caution!
///
/// Additionally the returned object can be safely mutated but the input slice isn't guaranteed to be mutable.
///
/// Finally, the returned object is disconnected from the input slice's lifetime, so there's no guarantee that the data is read at the right time.
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str>;
}
use crate::webgl_ctx::WebGlRenderingCtx;
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGB8U;
impl ImageFormat for RGB8U {
type P = [u8; 3];
const NUM_CHANNELS: usize = 3;
const FORMAT: u32 = WebGlRenderingCtx::RGB;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const CHANNEL_TYPE: ChannelType = ChannelType::RGB8U;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
let mut decoder = jpeg::Decoder::new(raw_bytes);
let bytes = decoder
.decode()
.map_err(|_| "Cannot decoder jpeg. This image may not be compressed.")?;
Ok(Bytes::Owned(bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGBA8U;
#[cfg(feature = "webgl2")]
impl ImageFormat for RGBA8U {
type P = [u8; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const CHANNEL_TYPE: ChannelType = ChannelType::RGBA8U;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
let mut decoder = jpeg::Decoder::new(raw_bytes);
let bytes = decoder
.decode()
.map_err(|_| "Cannot decoder png. This image may not be compressed.")?;
Ok(Bytes::Owned(bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGBA32F;
impl ImageFormat for RGBA32F {
type P = [f32; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
#[cfg(feature = "webgl2")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA32F as i32;
#[cfg(feature = "webgl1")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA as i32;
const CHANNEL_TYPE: ChannelType = ChannelType::RGBA32F;
const TYPE: u32 = WebGlRenderingCtx::FLOAT;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Float32Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGB32F;
impl ImageFormat for RGB32F {
type P = [f32; 3];
const NUM_CHANNELS: usize = 3;
const FORMAT: u32 = WebGlRenderingCtx::RGB;
#[cfg(feature = "webgl2")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB32F as i32;
#[cfg(feature = "webgl1")]
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB as i32;
const CHANNEL_TYPE: ChannelType = ChannelType::RGB32F;
const TYPE: u32 = WebGlRenderingCtx::FLOAT;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Float32Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R32F;
impl ImageFormat for R32F {
type P = [u8; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const CHANNEL_TYPE: ChannelType = ChannelType::R32F;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R64F;
impl ImageFormat for R64F {
type P = [u8; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const CHANNEL_TYPE: ChannelType = ChannelType::R32F;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[cfg(feature = "webgl2")]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R8UI;
#[cfg(feature = "webgl2")]
impl ImageFormat for R8UI {
type P = [u8; 1];
const NUM_CHANNELS: usize = 1;
const FORMAT: u32 = WebGlRenderingCtx::RED_INTEGER;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R8UI as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const CHANNEL_TYPE: ChannelType = ChannelType::R8UI;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[cfg(feature = "webgl2")]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R16I;
#[cfg(feature = "webgl2")]
impl ImageFormat for R16I {
type P = [i16; 1];
const NUM_CHANNELS: usize = 1;
const FORMAT: u32 = WebGlRenderingCtx::RED_INTEGER;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R16I as i32;
const TYPE: u32 = WebGlRenderingCtx::SHORT;
const CHANNEL_TYPE: ChannelType = ChannelType::R16I;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Int16Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[cfg(feature = "webgl2")]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R32I;
#[cfg(feature = "webgl2")]
impl ImageFormat for R32I {
type P = [i32; 1];
const NUM_CHANNELS: usize = 1;
const FORMAT: u32 = WebGlRenderingCtx::RED_INTEGER;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R32I as i32;
const TYPE: u32 = WebGlRenderingCtx::INT;
const CHANNEL_TYPE: ChannelType = ChannelType::R32I;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Int32Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub enum ChannelType {
RGBA32F,
RGB32F,
RGBA8U,
RGB8U,
R32F,
#[cfg(feature = "webgl2")]
R64F,
#[cfg(feature = "webgl2")]
R8UI,
#[cfg(feature = "webgl2")]
R16I,
#[cfg(feature = "webgl2")]
R32I,
}
impl ChannelType {
pub fn is_colored(&self) -> bool {
matches!(
self,
ChannelType::RGBA32F | ChannelType::RGB32F | ChannelType::RGBA8U | ChannelType::RGB8U
)
}
}
pub const NUM_CHANNELS: usize = 9;
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub struct ImageFormatType {
pub ext: ImageExt,
pub fmt: PixelType,
pub channel: ChannelType,
}
impl ImageFormatType {
@@ -11,11 +313,11 @@ impl ImageFormatType {
&self.ext
}
pub fn get_pixel_format(&self) -> PixelType {
self.fmt
pub fn get_channel(&self) -> ChannelType {
self.channel
}
pub fn is_colored(&self) -> bool {
!matches!(self.ext, ImageExt::Fits)
self.channel.is_colored()
}
}

View File

@@ -1,13 +1,13 @@
/* ------------------------------------------------------ */
#[derive(Debug)]
pub struct HTMLImage<F> {
pub image: web_sys::HtmlImageElement,
image: web_sys::HtmlImageElement,
format: std::marker::PhantomData<F>,
}
impl<F> HTMLImage<F>
where
F: TextureFormat + Clone,
F: ImageFormat + Clone,
{
pub fn new(image: web_sys::HtmlImageElement) -> Self {
Self {
@@ -15,20 +15,16 @@ where
format: std::marker::PhantomData,
}
}
pub fn element(&self) -> &web_sys::HtmlImageElement {
&self.image
}
}
use crate::image::format::ImageFormat;
use crate::image::Image;
use crate::texture::format::TextureFormat;
use crate::texture::Tex3D;
use cgmath::Vector3;
use wasm_bindgen::JsValue;
impl<F> Image for HTMLImage<F>
where
F: TextureFormat,
F: ImageFormat,
{
fn insert_into_3d_texture<T: Tex3D>(
&self,
@@ -47,7 +43,7 @@ where
Ok(())
}
fn get_size(&self) -> (u32, u32, u32) {
(self.image.width(), self.image.height(), 1)
fn get_size(&self) -> (u32, u32) {
(self.image.width(), self.image.height())
}
}

View File

@@ -6,9 +6,9 @@ pub mod html;
pub mod raw;
use crate::image::bitmap::Bitmap;
use crate::image::format::RGB8U;
use crate::image::format::RGBA8U;
use crate::image::raw::ImageBuffer;
use crate::texture::format::RGB8U;
use crate::texture::format::RGBA8U;
pub trait ArrayBuffer: AsRef<js_sys::Object> + std::fmt::Debug {
type Item: std::cmp::PartialOrd + Clone + Copy + std::fmt::Debug + cgmath::Zero;
@@ -179,7 +179,6 @@ impl ArrayBuffer for ArrayF64 {
}
use self::canvas::Canvas;
use self::fits::FitsImage;
use self::html::HTMLImage;
use wasm_bindgen::JsValue;
pub trait Image {
@@ -191,7 +190,7 @@ pub trait Image {
offset: &Vector3<i32>,
) -> Result<(), JsValue>;
fn get_size(&self) -> (u32, u32, u32);
fn get_size(&self) -> (u32, u32);
}
impl<I> Image for &I
@@ -211,14 +210,13 @@ where
Ok(())
}
#[inline]
fn get_size(&self) -> (u32, u32, u32) {
fn get_size(&self) -> (u32, u32) {
let image = &**self;
image.get_size()
}
}
use std::rc::Rc;
use std::{io::Cursor, rc::Rc};
impl<I> Image for Rc<I>
where
I: Image,
@@ -236,21 +234,23 @@ where
Ok(())
}
#[inline]
fn get_size(&self) -> (u32, u32, u32) {
fn get_size(&self) -> (u32, u32) {
let image = &**self;
image.get_size()
}
}
use crate::texture::format::{R16I, R32F, R32I, R8U};
use crate::texture::Tex3D;
#[cfg(feature = "webgl2")]
use crate::image::format::{R16I, R32I, R64F, R8UI};
use crate::{image::format::R32F, texture::Tex3D};
use fits::Fits;
#[derive(Debug)]
#[cfg(feature = "webgl2")]
pub enum ImageType {
FitsRawBytes {
FitsImage {
raw_bytes: js_sys::Uint8Array,
size: (u32, u32, u32),
size: (u32, u32),
},
Canvas {
canvas: Canvas<RGBA8U>,
@@ -283,7 +283,7 @@ pub enum ImageType {
image: ImageBuffer<R16I>,
},
RawR8ui {
image: ImageBuffer<R8U>,
image: ImageBuffer<R8UI>,
},
}
@@ -297,24 +297,25 @@ impl Image for ImageType {
offset: &Vector3<i32>,
) -> Result<(), JsValue> {
match self {
ImageType::FitsRawBytes {
ImageType::FitsImage {
raw_bytes: raw_bytes_buf,
..
} => {
let raw_bytes = raw_bytes_buf.to_vec();
let num_bytes = raw_bytes_buf.length() as usize;
let mut raw_bytes = vec![0; num_bytes];
raw_bytes_buf.copy_to(&mut raw_bytes[..]);
let images = FitsImage::from_raw_bytes(&raw_bytes)?;
for image in images {
image.insert_into_3d_texture(textures, offset)?
}
let mut bytes_reader = Cursor::new(raw_bytes.as_slice());
let fits_img = Fits::from_byte_slice(&mut bytes_reader)?;
fits_img.insert_into_3d_texture(textures, offset)?
}
ImageType::Canvas { canvas } => canvas.insert_into_3d_texture(textures, offset)?,
ImageType::ImageRgba8u { image } => image.insert_into_3d_texture(textures, offset)?,
ImageType::ImageRgb8u { image } => image.insert_into_3d_texture(textures, offset)?,
ImageType::HTMLImageRgba8u { image, .. } => {
ImageType::HTMLImageRgba8u { image } => {
image.insert_into_3d_texture(textures, offset)?
}
ImageType::HTMLImageRgb8u { image, .. } => {
ImageType::HTMLImageRgb8u { image } => {
image.insert_into_3d_texture(textures, offset)?
}
ImageType::RawRgb8u { image } => image.insert_into_3d_texture(textures, offset)?,
@@ -328,9 +329,9 @@ impl Image for ImageType {
Ok(())
}
fn get_size(&self) -> (u32, u32, u32) {
fn get_size(&self) -> (u32, u32) {
match self {
ImageType::FitsRawBytes { size, .. } => *size,
ImageType::FitsImage { size, .. } => *size,
ImageType::Canvas { canvas } => canvas.get_size(),
ImageType::ImageRgba8u { image } => image.get_size(),
ImageType::ImageRgb8u { image } => image.get_size(),

View File

@@ -1,18 +1,17 @@
use crate::texture::format::TextureFormat;
use crate::image::format::ImageFormat;
use crate::texture::pixel::Pixel;
use crate::texture::Tex3D;
#[derive(Debug)]
#[allow(dead_code)]
pub struct ImageBuffer<T>
where
T: TextureFormat,
T: ImageFormat,
{
pub data: Box<[<<T as TextureFormat>::P as Pixel>::Item]>,
pub size: (u32, u32, u32),
pub data: Vec<<<T as ImageFormat>::P as Pixel>::Item>,
pub size: Vector2<i32>,
}
use crate::texture::format::Bytes;
use crate::image::format::Bytes;
pub struct ImageBufferView {
pub x: i32,
@@ -23,25 +22,20 @@ pub struct ImageBufferView {
use wasm_bindgen::JsValue;
impl<T> ImageBuffer<T>
where
T: TextureFormat,
T: ImageFormat,
{
pub fn new(
data: Box<[<<T as TextureFormat>::P as Pixel>::Item]>,
width: u32,
height: u32,
depth: u32,
) -> Self {
let size_buf = width * height * depth * (T::NUM_CHANNELS as u32);
debug_assert!(size_buf == data.len() as u32);
pub fn new(data: Vec<<<T as ImageFormat>::P as Pixel>::Item>, width: i32, height: i32) -> Self {
let size_buf = width * height * (T::NUM_CHANNELS as i32);
debug_assert!(size_buf == data.len() as i32);
//let buf = <<T as ImageFormat>::P as Pixel>::Container::new(buf);
let size = (width, height, depth);
let size = Vector2::new(width, height);
Self { data, size }
}
pub fn from_encoded_raw_bytes(
raw_bytes: &[u8],
width: u32,
height: u32,
width: i32,
height: i32,
) -> Result<Self, JsValue> {
let mut decoded_bytes = match T::decode(raw_bytes).map_err(JsValue::from_str)? {
Bytes::Borrowed(bytes) => bytes.to_vec(),
@@ -50,39 +44,36 @@ where
let decoded_pixels = unsafe {
decoded_bytes.set_len(
decoded_bytes.len()
/ std::mem::size_of::<<<T as TextureFormat>::P as Pixel>::Item>(),
decoded_bytes.len() / std::mem::size_of::<<<T as ImageFormat>::P as Pixel>::Item>(),
);
std::mem::transmute::<Vec<u8>, Vec<<<T as TextureFormat>::P as Pixel>::Item>>(
std::mem::transmute::<Vec<u8>, Vec<<<T as ImageFormat>::P as Pixel>::Item>>(
decoded_bytes,
)
.into_boxed_slice()
};
Ok(Self::new(decoded_pixels, width, height, 1))
Ok(Self::new(decoded_pixels, width, height))
}
pub fn from_raw_bytes(mut raw_bytes: Vec<u8>, width: u32, height: u32) -> Self {
let size_buf = width * height * (std::mem::size_of::<T::P>() as u32);
debug_assert!(size_buf == raw_bytes.len() as u32);
pub fn from_raw_bytes(mut raw_bytes: Vec<u8>, width: i32, height: i32) -> Self {
let size_buf = width * height * (std::mem::size_of::<T::P>() as i32);
debug_assert!(size_buf == raw_bytes.len() as i32);
let decoded_pixels = unsafe {
raw_bytes.set_len(raw_bytes.len() / std::mem::size_of::<<T::P as Pixel>::Item>());
std::mem::transmute::<Vec<u8>, Vec<<T::P as Pixel>::Item>>(raw_bytes).into_boxed_slice()
raw_bytes.set_len(
raw_bytes.len() / std::mem::size_of::<<<T as ImageFormat>::P as Pixel>::Item>(),
);
std::mem::transmute::<Vec<u8>, Vec<<<T as ImageFormat>::P as Pixel>::Item>>(raw_bytes)
};
Self::new(decoded_pixels, width, height, 1)
Self::new(decoded_pixels, width, height)
}
pub fn empty() -> Self {
let size = (0, 0, 0);
Self {
data: Box::new([]),
size,
}
let size = Vector2::new(0, 0);
Self { data: vec![], size }
}
pub fn allocate(pixel_fill: &T::P, width: u32, height: u32) -> ImageBuffer<T> {
pub fn allocate(pixel_fill: &<T as ImageFormat>::P, width: i32, height: i32) -> ImageBuffer<T> {
let size_buf = ((width * height) as usize) * (T::NUM_CHANNELS);
let data = pixel_fill
@@ -91,10 +82,9 @@ where
.cloned()
.cycle()
.take(size_buf)
.collect::<Vec<_>>()
.into_boxed_slice();
.collect::<Vec<_>>();
ImageBuffer::<T>::new(data, width, height, 1)
ImageBuffer::<T>::new(data, width, height)
}
pub fn tex_sub(&mut self, src: &Self, s: &ImageBufferView, d: &ImageBufferView) {
@@ -103,8 +93,8 @@ where
for ix in s.x..(s.x + s.w) {
for iy in s.y..(s.y + s.h) {
let s_idx = ((iy * src.width() as i32) + ix) as usize;
let d_idx = ((di * self.width() as i32) + dj) as usize;
let s_idx = (iy * src.width() + ix) as usize;
let d_idx = (di * self.width() + dj) as usize;
for i in 0..T::NUM_CHANNELS {
let si = s_idx * T::NUM_CHANNELS + i;
@@ -122,38 +112,38 @@ where
}
}
pub fn iter(&self) -> impl Iterator<Item = &<T::P as Pixel>::Item> {
pub fn iter(&self) -> impl Iterator<Item = &<<T as ImageFormat>::P as Pixel>::Item> {
self.data.iter()
}
pub fn get_data(&self) -> &[<T::P as Pixel>::Item] {
pub fn get_data(&self) -> &[<<T as ImageFormat>::P as Pixel>::Item] {
&self.data
}
pub fn width(&self) -> u32 {
self.size.0
pub fn width(&self) -> i32 {
self.size.x
}
pub fn height(&self) -> u32 {
self.size.1
pub fn height(&self) -> i32 {
self.size.y
}
}
use crate::texture::format::{R16I, R32F, R32I, R8U, RGB8U, RGBA8U};
use crate::image::format::{R16I, R32F, R32I, R8UI, RGB8U, RGBA8U};
pub enum ImageBufferType {
JPG(ImageBuffer<RGB8U>),
PNG(ImageBuffer<RGBA8U>),
R32F(ImageBuffer<R32F>),
R8UI(ImageBuffer<R8U>),
R8UI(ImageBuffer<R8UI>),
R16I(ImageBuffer<R16I>),
R32I(ImageBuffer<R32I>),
}
use crate::image::{ArrayBuffer, Image};
use cgmath::Vector3;
use cgmath::{Vector2, Vector3};
impl<I> Image for ImageBuffer<I>
where
I: TextureFormat,
I: ImageFormat,
{
fn insert_into_3d_texture<T: Tex3D>(
&self,
@@ -162,14 +152,15 @@ where
// An offset to write the image in the texture array
offset: &Vector3<i32>,
) -> Result<(), JsValue> {
let js_array = <<I::P as Pixel>::Container as ArrayBuffer>::new(&self.data);
let js_array =
<<<I as ImageFormat>::P as Pixel>::Container as ArrayBuffer>::new(&self.data);
textures.tex_sub_image_3d_with_opt_array_buffer_view(
offset.x,
offset.y,
offset.z,
self.width() as i32,
self.height() as i32,
self.size.2 as i32,
self.width(),
self.height(),
1,
Some(js_array.as_ref()),
);
@@ -177,7 +168,7 @@ where
}
// The size of the image
fn get_size(&self) -> (u32, u32, u32) {
self.size
fn get_size(&self) -> (u32, u32) {
(self.size.x as u32, self.size.y as u32)
}
}

View File

@@ -1,8 +1,8 @@
extern crate futures;
//extern crate jpeg_decoder as jpeg;
//extern crate png;
extern crate jpeg_decoder as jpeg;
extern crate png;
extern crate serde_json;
//extern crate wasm_streams;
extern crate wasm_streams;
pub mod convert;
pub mod image;

View File

@@ -6,11 +6,6 @@ extern "C" {
pub fn log(s: &str);
}
#[macro_export]
macro_rules! al_print {
($($arg:tt)*) => { al_core::log(&format!("{:?}", $($arg),*)) };
}
// ----------------------------------------------------------------------------
// Helpers to hide some of the verbosity of web_sys

View File

@@ -2,7 +2,7 @@ use {wasm_bindgen::prelude::*, web_sys::WebGlFramebuffer};
use crate::webgl_ctx::WebGlRenderingCtx;
// Internal format used for the framebuffer final texture
use crate::texture::format::RGBA8U;
use crate::image::format::RGBA8U;
pub struct FrameBufferObject {
gl: WebGlContext,

View File

@@ -330,7 +330,6 @@ impl SendUniformsWithParams<Colormaps> for HiPSColor {
let cmap = cmaps.get(self.cmap_name.as_ref());
shader
.attach_uniforms_from(cmaps)
.attach_uniforms_with_params_from(cmap, cmaps)
.attach_uniform("H", &self.stretch)
.attach_uniform("min_value", &self.min_cut.unwrap_or(0.0))

View File

@@ -1,4 +1,4 @@
use crate::texture::format::TextureFormat;
use crate::image::format::ImageFormat;
use web_sys::HtmlCanvasElement;
use web_sys::WebGlTexture;
@@ -19,10 +19,11 @@ pub struct Texture3D {
texture: Option<WebGlTexture>,
metadata: Option<Rc<RefCell<Texture2DMeta>>>,
_depth: i32,
}
impl Texture3D {
pub fn create_empty<F: TextureFormat>(
pub fn create_empty<F: ImageFormat>(
gl: &WebGlContext,
// The weight of the individual textures
width: i32,
@@ -53,14 +54,16 @@ impl Texture3D {
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
width: width as u32,
height: height as u32,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
ty: F::TYPE,
pixel_type: F::PIXEL_TYPE,
channel_type: F::CHANNEL_TYPE,
})));
Ok(Texture3D {
texture,
gl: gl.clone(),
_depth: depth,
metadata,
})
}
@@ -69,7 +72,7 @@ impl Texture3D {
self.gl.generate_mipmap(WebGlRenderingCtx::TEXTURE_3D);
}
pub fn bind(&self) -> Texture3DBound<'_> {
pub fn bind(&self) -> Texture3DBound {
self.gl
.bind_texture(WebGlRenderingCtx::TEXTURE_3D, self.texture.as_ref());

View File

@@ -1,9 +1,9 @@
use crate::texture::format::PixelType;
use crate::texture::format::TextureFormat;
use crate::image::format::ImageFormat;
use web_sys::HtmlCanvasElement;
use web_sys::WebGlTexture;
use crate::texture::pixel::Pixel;
use crate::texture::ChannelType;
use crate::texture::Texture2DMeta;
use crate::webgl_ctx::WebGlContext;
use crate::webgl_ctx::WebGlRenderingCtx;
@@ -22,7 +22,7 @@ pub struct Texture2DArray {
}
impl Texture2DArray {
pub fn create_empty<F: TextureFormat>(
pub fn create_empty<F: ImageFormat>(
gl: &WebGlContext,
// The weight of the individual textures
width: i32,
@@ -53,9 +53,10 @@ impl Texture2DArray {
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
width: width as u32,
height: height as u32,
pixel_type: F::PIXEL_TYPE,
ty: F::TYPE,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
ty: F::TYPE,
channel_type: F::CHANNEL_TYPE,
})));
Ok(Texture2DArray {
@@ -70,7 +71,7 @@ impl Texture2DArray {
self.gl.generate_mipmap(WebGlRenderingCtx::TEXTURE_2D_ARRAY);
}
pub fn bind(&self) -> Texture2DArrayBound<'_> {
pub fn bind(&self) -> Texture2DArrayBound {
self.gl
.bind_texture(WebGlRenderingCtx::TEXTURE_2D_ARRAY, self.texture.as_ref());
@@ -115,31 +116,37 @@ impl Texture2DArray {
self.gl
.viewport(0, 0, metadata.width as i32, metadata.height as i32);
let value = match metadata.pixel_type {
PixelType::R8U => {
#[cfg(feature = "webgl2")]
let value = match metadata.channel_type {
ChannelType::R8UI => {
let p = <[u8; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R16I => {
ChannelType::R16I => {
let p = <[i16; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R32I => {
ChannelType::R32I => {
let p = <[i32; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R32F => {
ChannelType::R32F => {
let p = <[f32; 1]>::read_pixel(&self.gl, x, y)?;
crate::log(&format!("{:?}", p));
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::RGB8U => {
ChannelType::RGB8U => {
let p = <[u8; 3]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
PixelType::RGBA8U => {
ChannelType::RGBA8U => {
let p = <[u8; 4]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
_ => Err(JsValue::from_str(
"Pixel retrieval not implemented for that texture format.",
)),
};
// Unbind the framebuffer

View File

@@ -1,207 +0,0 @@
use crate::texture::pixel::Pixel;
pub type Bytes<'a> = std::borrow::Cow<'a, [u8]>;
pub trait TextureFormat {
type P: Pixel;
type ArrayBufferView: AsRef<js_sys::Object>;
const NUM_CHANNELS: usize;
const FORMAT: u32;
const INTERNAL_FORMAT: i32;
const TYPE: u32;
const PIXEL_TYPE: PixelType;
/// Creates a JS typed array which is a view into wasm's linear memory at the slice specified.
/// This function returns a new typed array which is a view into wasm's memory. This view does not copy the underlying data.
///
/// # Safety
///
/// Views into WebAssembly memory are only valid so long as the backing buffer isn't resized in JS. Once this function is called any future calls to Box::new (or malloc of any form) may cause the returned value here to be invalidated. Use with caution!
///
/// Additionally the returned object can be safely mutated but the input slice isn't guaranteed to be mutable.
///
/// Finally, the returned object is disconnected from the input slice's lifetime, so there's no guarantee that the data is read at the right time.
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str>;
}
use crate::webgl_ctx::WebGlRenderingCtx;
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGB8U;
impl TextureFormat for RGB8U {
type P = [u8; 3];
const NUM_CHANNELS: usize = 3;
const FORMAT: u32 = WebGlRenderingCtx::RGB;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGB8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const PIXEL_TYPE: PixelType = PixelType::RGB8U;
fn decode(_raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
todo!()
/*let mut decoder = jpeg::Decoder::new(raw_bytes);
let bytes = decoder
.decode()
.map_err(|_| "Cannot decoder jpeg. This image may not be compressed.")?;
Ok(Bytes::Owned(bytes))*/
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct RGBA8U;
impl TextureFormat for RGBA8U {
type P = [u8; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const PIXEL_TYPE: PixelType = PixelType::RGBA8U;
fn decode(_raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
/*let mut decoder = jpeg::Decoder::new(raw_bytes);
let bytes = decoder
.decode()
.map_err(|_| "Cannot decoder png. This image may not be compressed.")?;
Ok(Bytes::Owned(bytes))
*/
todo!()
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R32F;
impl TextureFormat for R32F {
type P = [u8; 4];
const NUM_CHANNELS: usize = 4;
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const PIXEL_TYPE: PixelType = PixelType::R32F;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R8U;
impl TextureFormat for R8U {
type P = [u8; 1];
const FORMAT: u32 = WebGlRenderingCtx::RED;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::R8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const NUM_CHANNELS: usize = 1;
const PIXEL_TYPE: PixelType = PixelType::R8U;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R16I;
impl TextureFormat for R16I {
type P = [u8; 2];
const NUM_CHANNELS: usize = 2;
const FORMAT: u32 = WebGlRenderingCtx::RG;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RG8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const PIXEL_TYPE: PixelType = PixelType::R16I;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct R32I;
impl TextureFormat for R32I {
type P = [u8; 4];
const FORMAT: u32 = WebGlRenderingCtx::RGBA;
const INTERNAL_FORMAT: i32 = WebGlRenderingCtx::RGBA8 as i32;
const TYPE: u32 = WebGlRenderingCtx::UNSIGNED_BYTE;
const NUM_CHANNELS: usize = 4;
const PIXEL_TYPE: PixelType = PixelType::R32I;
fn decode(raw_bytes: &[u8]) -> Result<Bytes<'_>, &'static str> {
Ok(Bytes::Borrowed(raw_bytes))
}
type ArrayBufferView = js_sys::Uint8Array;
unsafe fn view(s: &[<Self::P as Pixel>::Item]) -> Self::ArrayBufferView {
Self::ArrayBufferView::view(s)
}
}
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub enum PixelType {
R8U,
R16I,
R32I,
R32F,
RGB8U,
RGBA8U,
}
impl PixelType {
pub const fn num_channels(&self) -> usize {
match self {
Self::RGB8U => 3,
Self::RGBA8U => 4,
_ => 1,
}
}
}
pub const NUM_CHANNELS: usize = 6;

View File

@@ -1,7 +1,6 @@
pub mod array;
pub use array::Texture2DArray;
pub mod format;
pub mod pixel;
pub use pixel::*;
@@ -12,7 +11,7 @@ pub use mod_3d::Texture3D;
use web_sys::HtmlCanvasElement;
use web_sys::WebGlTexture;
use crate::texture::format::PixelType;
use crate::image::format::ChannelType;
use crate::webgl_ctx::WebGlContext;
use crate::webgl_ctx::WebGlRenderingCtx;
use wasm_bindgen::prelude::*;
@@ -25,8 +24,9 @@ pub static mut CUR_IDX_TEX_UNIT: u8 = 0;
#[allow(dead_code)]
pub struct Texture2DMeta {
pub format: u32,
pub internal_format: i32,
pub ty: u32,
pub pixel_type: PixelType,
pub channel_type: ChannelType,
pub width: u32,
pub height: u32,
@@ -47,13 +47,13 @@ pub enum SamplerType {
Unsigned,
}
use crate::texture::format::TextureFormat;
use crate::image::format::ImageFormat;
//use super::pixel::PixelType;
use std::cell::RefCell;
use std::path::Path;
use std::rc::Rc;
impl Texture2D {
pub fn create_from_path<P: AsRef<Path>, F: TextureFormat>(
pub fn create_from_path<P: AsRef<Path>, F: ImageFormat>(
gl: &WebGlContext,
name: &'static str,
src: &P,
@@ -61,11 +61,12 @@ impl Texture2D {
) -> Result<Texture2D, JsValue> {
let image = HtmlImageElement::new().unwrap_abort();
#[cfg(feature = "webgl2")]
let texture = gl.create_texture();
let onerror = {
Closure::wrap(Box::new(move || {
println!("Cannot load texture located at: {name:?}");
println!("Cannot load texture located at: {:?}", name);
}) as Box<dyn Fn()>)
};
@@ -75,11 +76,13 @@ impl Texture2D {
let metadata = Rc::new(RefCell::new(Texture2DMeta {
width,
height,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
ty: F::TYPE,
pixel_type: F::PIXEL_TYPE,
channel_type: F::CHANNEL_TYPE,
}));
#[cfg(feature = "webgl2")]
let onload = {
let image = image.clone();
let gl = gl.clone();
@@ -129,6 +132,7 @@ impl Texture2D {
let gl = gl.clone();
Ok(Texture2D {
#[cfg(feature = "webgl2")]
texture,
gl,
@@ -137,7 +141,7 @@ impl Texture2D {
})
}
pub fn create_from_raw_pixels<F: TextureFormat>(
pub fn create_from_raw_pixels<F: ImageFormat>(
gl: &WebGlContext,
width: i32,
height: i32,
@@ -162,12 +166,12 @@ impl Texture2D {
Ok(texture)
}
pub fn create_from_raw_bytes<F: TextureFormat>(
pub fn create_from_raw_bytes<F: ImageFormat>(
gl: &WebGlContext,
width: i32,
height: i32,
tex_params: &'static [(u32, u32)],
bytes: &[u8],
bytes: Option<&[u8]>,
) -> Result<Texture2D, JsValue> {
let texture = gl.create_texture();
@@ -184,14 +188,7 @@ impl Texture2D {
width,
height,
);
let view = unsafe {
let len = bytes.len() / (std::mem::size_of::<<F::P as Pixel>::Item>());
let pixels =
std::slice::from_raw_parts(bytes.as_ptr() as *const <F::P as Pixel>::Item, len);
F::view(pixels)
};
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_array_buffer_view(
gl.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_u8_array(
WebGlRenderingCtx::TEXTURE_2D,
0,
0,
@@ -200,7 +197,7 @@ impl Texture2D {
height,
F::FORMAT,
F::TYPE,
Some(view.as_ref()),
bytes,
)
.expect("Texture 2D");
@@ -208,9 +205,10 @@ impl Texture2D {
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
width: width as u32,
height: height as u32,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
ty: F::TYPE,
pixel_type: F::PIXEL_TYPE,
channel_type: F::CHANNEL_TYPE,
})));
Ok(Texture2D {
@@ -222,7 +220,7 @@ impl Texture2D {
})
}
pub fn create_empty_with_format<F: TextureFormat>(
pub fn create_empty_with_format<F: ImageFormat>(
gl: &WebGlContext,
width: i32,
height: i32,
@@ -248,14 +246,16 @@ impl Texture2D {
let metadata = Some(Rc::new(RefCell::new(Texture2DMeta {
width: width as u32,
height: height as u32,
internal_format: F::INTERNAL_FORMAT,
format: F::FORMAT,
ty: F::TYPE,
pixel_type: F::PIXEL_TYPE,
channel_type: F::CHANNEL_TYPE,
})));
Ok(Texture2D {
texture,
gl,
metadata,
})
}
@@ -295,7 +295,7 @@ impl Texture2D {
self
}
pub fn bind(&self) -> Texture2DBound<'_> {
pub fn bind(&self) -> Texture2DBound {
self.gl
.bind_texture(WebGlRenderingCtx::TEXTURE_2D, self.texture.as_ref());
@@ -335,31 +335,37 @@ impl Texture2D {
self.gl
.viewport(0, 0, metadata.width as i32, metadata.height as i32);
let value = match metadata.pixel_type {
PixelType::R8U => {
#[cfg(feature = "webgl2")]
let value = match metadata.channel_type {
ChannelType::R8UI => {
let p = <[u8; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R16I => {
ChannelType::R16I => {
let p = <[i16; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R32I => {
ChannelType::R32I => {
let p = <[i32; 1]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::R32F => {
ChannelType::R32F => {
let p = <[f32; 1]>::read_pixel(&self.gl, x, y)?;
crate::log(&format!("{:?}", p));
Ok(serde_wasm_bindgen::to_value(&p[0])?)
}
PixelType::RGB8U => {
ChannelType::RGB8U => {
let p = <[u8; 3]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
PixelType::RGBA8U => {
ChannelType::RGBA8U => {
let p = <[u8; 4]>::read_pixel(&self.gl, x, y)?;
Ok(serde_wasm_bindgen::to_value(&p)?)
}
_ => Err(JsValue::from_str(
"Pixel retrieval not implemented for that texture format.",
)),
};
// Unbind the framebuffer

View File

@@ -21,6 +21,70 @@ pub trait Pixel:
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue>;
}
impl Pixel for [f32; 4] {
type Item = f32;
type Container = ArrayF32;
const BLACK: Self = [f32::NAN; 4];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let pixels = js_sys::Float32Array::new_with_length(4);
#[cfg(feature = "webgl2")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RGBA32F,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
#[cfg(feature = "webgl1")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RGBA,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
let pixels = pixels.to_vec();
Ok([pixels[0], pixels[1], pixels[2], pixels[3]])
}
}
impl Pixel for [f32; 3] {
type Item = f32;
type Container = ArrayF32;
const BLACK: Self = [f32::NAN; 3];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let pixels = js_sys::Float32Array::new_with_length(3);
#[cfg(feature = "webgl2")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RGB32F,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
#[cfg(feature = "webgl1")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RGB,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
let pixels = pixels.to_vec();
Ok([pixels[0], pixels[1], pixels[2]])
}
}
impl Pixel for [f32; 1] {
type Item = f32;
type Container = ArrayF32;
@@ -46,7 +110,38 @@ impl Pixel for [f32; 1] {
])])
}
}
/*use crate::image::ArrayF64;
impl Pixel for [f64; 1] {
type Item = f64;
type Container = ArrayF64;
const BLACK: Self = [std::f64::NAN];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let pixels = js_sys::Float32Array::new_with_length(1);
#[cfg(feature = "webgl2")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RED,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
#[cfg(feature = "webgl1")]
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::LUMINANCE_ALPHA,
WebGlRenderingCtx::FLOAT,
Some(&pixels),
)?;
Ok([pixels.to_vec()[0] as f64])
}
}*/
impl Pixel for [u8; 4] {
type Item = u8;
type Container = ArrayU8;
@@ -88,27 +183,7 @@ impl Pixel for [u8; 3] {
Ok([pixels[0], pixels[1], pixels[2]])
}
}
impl Pixel for [u8; 2] {
type Item = u8;
type Container = ArrayU8;
const BLACK: Self = [0, 0];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let pixels = js_sys::Uint8Array::new_with_length(2);
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RG,
WebGlRenderingCtx::UNSIGNED_BYTE,
Some(&pixels),
)?;
let pixels = pixels.to_vec();
Ok([pixels[0], pixels[1]])
}
}
#[cfg(feature = "webgl2")]
impl Pixel for [u8; 1] {
type Item = u8;
type Container = ArrayU8;
@@ -129,50 +204,45 @@ impl Pixel for [u8; 1] {
Ok([pixels.to_vec()[0]])
}
}
#[cfg(feature = "webgl2")]
impl Pixel for [i16; 1] {
type Item = i16;
type Container = ArrayI16;
const BLACK: Self = [i16::MIN];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let p = js_sys::Uint8Array::new_with_length(2);
let pixels = js_sys::Int16Array::new_with_length(1);
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RG,
WebGlRenderingCtx::UNSIGNED_BYTE,
Some(&p),
WebGlRenderingCtx::RED_INTEGER,
WebGlRenderingCtx::SHORT,
Some(&pixels),
)?;
Ok([i16::from_le_bytes([p.at(0).unwrap(), p.at(1).unwrap()])])
Ok([pixels.to_vec()[0]])
}
}
#[cfg(feature = "webgl2")]
impl Pixel for [i32; 1] {
type Item = i32;
type Container = ArrayI32;
const BLACK: Self = [i32::MIN];
fn read_pixel(gl: &WebGlContext, x: i32, y: i32) -> Result<Self, JsValue> {
let p = js_sys::Uint8Array::new_with_length(4);
let pixels = js_sys::Int32Array::new_with_length(1);
gl.read_pixels_with_opt_array_buffer_view(
x,
y,
1,
1,
WebGlRenderingCtx::RGBA,
WebGlRenderingCtx::UNSIGNED_BYTE,
Some(&p),
WebGlRenderingCtx::RED_INTEGER,
WebGlRenderingCtx::INT,
Some(&pixels),
)?;
Ok([i32::from_le_bytes([
p.at(0).unwrap(),
p.at(1).unwrap(),
p.at(2).unwrap(),
p.at(3).unwrap(),
])])
Ok([pixels.to_vec()[0]])
}
}

View File

@@ -4,7 +4,10 @@ use wasm_bindgen::JsCast;
use wasm_bindgen::JsValue;
use web_sys::HtmlElement;
#[cfg(feature = "webgl2")]
pub type WebGlRenderingCtx = web_sys::WebGl2RenderingContext;
#[cfg(feature = "webgl1")]
pub type WebGlRenderingCtx = web_sys::WebGlRenderingContext;
#[derive(Clone)]
pub struct WebGlContext {

View File

@@ -31,7 +31,8 @@ fn generate_shaders() -> std::result::Result<(), Box<dyn Error>> {
let src = read_shader(path)?;
shaders.insert(out_file_name, src);
println!("cargo:rerun-if-changed=src/shaders/{file_name}");
//fs::write(&out_name, result)?;
println!("cargo:rerun-if-changed=src/shaders/{}", file_name);
}
}
}
@@ -49,20 +50,15 @@ fn read_shader<P: AsRef<std::path::Path>>(path: P) -> std::io::Result<String> {
let shader_src = std::io::BufReader::new(file)
.lines()
.map_while(Result::ok)
.filter_map(|l| {
.map(|l| {
if l.starts_with("#include") {
let incl_file_names: Vec<_> = l.split_terminator(&[';', ' '][..]).collect();
let incl_file_name_rel = incl_file_names[1];
let incl_file_name = path.parent().unwrap().join(incl_file_name_rel);
println!("{}", incl_file_name.to_string_lossy());
Some(read_shader(incl_file_name.to_str().unwrap()).unwrap())
} else if l.trim_start().starts_with("//") {
// comment
None
read_shader(incl_file_name.to_str().unwrap()).unwrap()
} else {
Some(l)
l
}
})
.collect::<Vec<_>>()

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +0,0 @@
use js_sys::Reflect;
use wasm_bindgen::JsValue;
use web_sys::window;
pub struct BrowserFeaturesSupport {
pub create_image_bitmap: bool,
}
impl Default for BrowserFeaturesSupport {
fn default() -> Self {
Self::new()
}
}
impl BrowserFeaturesSupport {
pub fn new() -> Self {
let window = window().expect("no global `window` exists");
let create_image_bitmap =
Reflect::has(&window, &JsValue::from_str("createImageBitmap")).unwrap_or(false);
Self {
create_image_bitmap,
}
}
}

View File

@@ -54,8 +54,8 @@ fn linspace(a: f64, b: f64, num: usize) -> Vec<f64> {
res
}
const NUM_VERTICES_WIDTH: usize = 4;
const NUM_VERTICES_HEIGHT: usize = 4;
const NUM_VERTICES_WIDTH: usize = 3;
const NUM_VERTICES_HEIGHT: usize = 3;
const NUM_VERTICES: usize = 4 + 2 * NUM_VERTICES_WIDTH + 2 * NUM_VERTICES_HEIGHT;
// This struct belongs to the CameraViewPort
pub struct FieldOfView {

View File

@@ -8,8 +8,8 @@ pub use fov::FieldOfView;
pub mod view_hpx_cells;
use crate::CooSystem;
use crate::HEALPixCoverage;
use crate::ProjectionType;
use crate::SpaceMoc;
pub fn build_fov_coverage(
depth: u8,
@@ -18,7 +18,7 @@ pub fn build_fov_coverage(
camera_frame: CooSystem,
frame: CooSystem,
proj: &ProjectionType,
) -> SpaceMoc {
) -> HEALPixCoverage {
if let Some(vertices) = fov.get_vertices() {
// The vertices coming from the camera are in a specific coo sys
// but cdshealpix accepts them to be given in ICRS coo sys
@@ -44,20 +44,20 @@ pub fn build_fov_coverage(
::healpix::nested::hash(depth, lon.to_radians(), lat.to_radians())
});
SpaceMoc::from_fixed_hpx_cells(depth, hpx_idxs_iter, Some(vertices.len()))
HEALPixCoverage::from_fixed_hpx_cells(depth, hpx_idxs_iter, Some(vertices.len()))
} else {
// The polygon is not too small for the depth asked
let inside_vertex = crate::coosys::apply_coo_system(camera_frame, frame, camera_center);
// Prefer to query from_polygon with depth >= 2
SpaceMoc::from_3d_coos(depth, vertices_iter, &inside_vertex)
HEALPixCoverage::from_3d_coos(depth, vertices_iter, &inside_vertex)
}
} else {
let center_xyz = crate::coosys::apply_coo_system(camera_frame, frame, camera_center);
let biggest_fov_rad = proj.aperture_start().to_radians();
let lonlat = center_xyz.lonlat();
SpaceMoc::from_cone(&lonlat, biggest_fov_rad * 0.5, depth)
HEALPixCoverage::from_cone(&lonlat, biggest_fov_rad * 0.5, depth)
}
}

View File

@@ -3,7 +3,7 @@ use crate::healpix::cell::HEALPixCell;
use crate::math::projection::*;
use crate::SpaceMoc;
use crate::HEALPixCoverage;
use moclib::moc::{range::op::degrade::degrade, RangeMOCIterator};
@@ -84,7 +84,7 @@ impl ViewHpxCells {
self.hpx_cells[frame as usize].get_cells(depth)
}
pub(super) fn get_cov(&self, frame: CooSystem) -> &SpaceMoc {
pub(super) fn get_cov(&self, frame: CooSystem) -> &HEALPixCoverage {
self.hpx_cells[frame as usize].get_cov()
}
@@ -109,7 +109,7 @@ pub struct HpxCells {
// An index vector referring to the indices of each depth cells
//idx_rng: [Option<Range<usize>>; MAX_HPX_DEPTH as usize + 1],
// Coverage created in the frame
cov: SpaceMoc,
cov: HEALPixCoverage,
// boolean refering to if the cells in the view has changed
//new_cells: bool,
}
@@ -127,7 +127,7 @@ use super::FieldOfView;
impl HpxCells {
pub fn new(frame: CooSystem) -> Self {
//let cells = Vec::new();
let cov = SpaceMoc::empty(29);
let cov = HEALPixCoverage::empty(29);
//let idx_rng = Default::default();
@@ -203,7 +203,7 @@ impl HpxCells {
if depth == cov_depth {
self.cov
.flatten_to_fixed_depth_cells()
.map(|idx| HEALPixCell(depth, idx))
.map(move |idx| HEALPixCell(depth, idx))
.collect()
} else if depth > self.cov.depth_max() {
let cov_d = self.cov.depth_max();
@@ -212,7 +212,7 @@ impl HpxCells {
self.cov
.flatten_to_fixed_depth_cells()
.flat_map(|idx| {
.flat_map(move |idx| {
// idx is at depth_max
HEALPixCell(cov_d, idx).get_children_cells(dd)
})
@@ -221,7 +221,7 @@ impl HpxCells {
// compute the cells from the coverage
degrade((&self.cov.0).into_range_moc_iter(), depth)
.flatten_to_fixed_depth_cells()
.map(|idx| HEALPixCell(depth, idx))
.map(move |idx| HEALPixCell(depth, idx))
.collect()
}
}
@@ -257,7 +257,7 @@ impl HpxCells {
}*/
#[inline(always)]
pub fn get_cov(&self) -> &SpaceMoc {
pub fn get_cov(&self) -> &HEALPixCoverage {
&self.cov
}

View File

@@ -12,7 +12,7 @@ const ID_R: &Matrix3<f64> = &Matrix3::new(-1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.
use super::{fov::FieldOfView, view_hpx_cells::ViewHpxCells};
use crate::healpix::cell::HEALPixCell;
use crate::healpix::moc::SpaceMoc;
use crate::healpix::coverage::HEALPixCoverage;
use crate::math::angle::ToAngle;
use crate::math::{projection::coo_space::XYZModel, projection::domain::sdf::ProjDef};
use cgmath::{InnerSpace, Vector3};
@@ -216,7 +216,7 @@ impl CameraViewPort {
self.view_hpx_cells.has_changed()
}*/
pub fn get_cov(&self, frame: CooSystem) -> &SpaceMoc {
pub fn get_cov(&self, frame: CooSystem) -> &HEALPixCoverage {
self.view_hpx_cells.get_cov(frame)
}

View File

@@ -10,13 +10,13 @@ pub struct Downloader {
requests: Vec<RequestType>,
queried_list: HashSet<QueryId>,
cache: Cache<QueryId, RequestType>,
cache: Cache<QueryId, Resource>,
}
use crate::fifo_cache::Cache;
use query::Query;
use request::RequestType;
use request::{RequestType, Resource};
impl Default for Downloader {
fn default() -> Self {
@@ -62,23 +62,26 @@ impl Downloader {
}
}
pub fn get_received_resources(&mut self) -> Vec<RequestType> {
pub fn get_received_resources(&mut self) -> Vec<Resource> {
let mut rscs = vec![];
let mut not_finished_requests = vec![];
let mut finished_query_list = vec![];
self.requests = self
.requests
.drain(..)
.filter(|request| {
// If the request resolves into a resource
if let Some(rsc) = request.into() {
rscs.push(rsc);
finished_query_list.push(request.id().clone());
while let Some(request) = self.requests.pop() {
if request.is_resolved() {
finished_query_list.push(request.id().clone());
rscs.push(request);
// The request is not resolved, we keep it
} else {
not_finished_requests.push(request);
}
}
self.requests = not_finished_requests;
false
// The request is not resolved, we keep it
} else {
true
}
})
.collect();
for query_id in finished_query_list.into_iter() {
self.queried_list.remove(&query_id);
@@ -95,10 +98,16 @@ impl Downloader {
self.queried_list.contains(id)
}
pub fn delay(&mut self, r: RequestType) {
pub fn delay(&mut self, r: Resource) {
match r {
RequestType::Tile(tile) => {
self.cache.insert(tile.id.clone(), RequestType::Tile(tile));
Resource::Tile(tile) => {
let k = format!(
"{:?}{:?}/{:?}",
tile.get_hips_cdid(),
tile.cell.depth(),
tile.cell.idx()
);
self.cache.insert(k, Resource::Tile(tile));
}
_ => unimplemented!(),
}

View File

@@ -6,87 +6,32 @@ pub trait Query: Sized {
fn id(&self) -> &QueryId;
}
pub type QueryId = String;
use crate::browser_support::BrowserFeaturesSupport;
use crate::healpix::cell::HEALPixFreqCell;
use al_api::hips::DataproductType;
use al_core::image::format::ImageFormatType;
/// Description of a cell to query
#[derive(Clone, PartialEq, Eq)]
pub enum CellDesc {
HiPS2D {
// A description of the tile in space
cell: HEALPixCell,
// Size of the tile requested
tile_size: u32,
},
HiPS3D {
// A description of the tile in space and frequency
cell: HEALPixFreqCell,
// Size of the tile requested
tile_size: u32,
// Depth of the cubic tile
tile_depth: u32,
},
HiPSCube {
// A description of the tile in space
cell: HEALPixCell,
// size of the tile requested
tile_size: u32,
// The channel number to query
channel: u32,
},
}
impl CellDesc {
/*fn get_size(&self) -> (u32, u32, u32) {
match self {
Self::HiPS2D { tile_size, .. } => (*tile_size, *tile_size, 1),
Self::HiPSCube { tile_size, .. } => (*tile_size, *tile_size, 1),
Self::HiPS3D {
tile_size,
tile_depth,
..
} => (*tile_size, *tile_size, *tile_depth),
}
}*/
pub fn get_hpx(&self) -> &HEALPixCell {
match self {
Self::HiPS2D { cell, .. } => cell,
Self::HiPS3D { cell, .. } => &cell.hpx,
Self::HiPSCube { cell, .. } => cell,
}
}
}
#[derive(Eq, PartialEq, Clone)]
pub struct Tile {
pub cell: CellDesc,
pub cell: HEALPixCell,
pub format: ImageFormatType,
// The root url of the HiPS
pub hips_cdid: CreatorDid,
// The total url of the query
pub url: Url,
pub size: u32, // size of the tile requested
pub credentials: RequestCredentials,
pub mode: RequestMode,
pub id: QueryId,
pub create_bitmap_support: bool,
pub channel: Option<u32>,
}
use crate::healpix::cell::HEALPixCell;
use crate::renderable::hips::config::HiPSConfig;
use crate::renderable::CreatorDid;
use crate::tile_fetcher::HiPSLocalFiles;
use web_sys::{RequestCredentials, RequestMode};
impl Tile {
pub fn new(
cell: &HEALPixCell,
cfg: &HiPSConfig,
browser_support: &BrowserFeaturesSupport,
) -> Self {
pub fn new(cell: &HEALPixCell, channel: Option<u32>, cfg: &HiPSConfig) -> Self {
let hips_cdid = cfg.get_creator_did();
let hips_url = cfg.get_root_url();
let format = cfg.get_format();
@@ -99,108 +44,38 @@ impl Tile {
let dir_idx = (idx / 10000) * 10000;
let url = format!("{hips_url}/Norder{depth}/Dir{dir_idx}/Npix{idx}.{ext}");
let mut url = format!("{}/Norder{}/Dir{}/Npix{}", hips_url, depth, dir_idx, idx);
let id = format!("{}_{}_{}_{}", hips_cdid, depth, idx, ext);
let tile_size = cfg.get_tile_size() as u32;
Tile {
hips_cdid: hips_cdid.to_string(),
url,
cell: CellDesc::HiPS2D {
cell: *cell,
tile_size,
},
format,
credentials,
mode,
id,
create_bitmap_support: browser_support.create_image_bitmap,
// handle cube case
if let Some(channel) = channel {
if channel > 0 {
url.push_str(&format!("_{:?}", channel));
}
}
}
pub fn new_with_channel(
cell: &HEALPixCell,
channel: u32,
cfg: &HiPSConfig,
browser_support: &BrowserFeaturesSupport,
) -> Self {
let hips_cdid = cfg.get_creator_did();
let hips_url = cfg.get_root_url();
let format = cfg.get_format();
let credentials = cfg.get_request_credentials();
let mode = cfg.get_request_mode();
// add the tile format
url.push_str(&format!(".{}", ext));
let ext = format.get_ext_file();
let id = format!(
"{}{}{}{}{}",
hips_cdid,
depth,
idx,
channel.unwrap_or(0),
ext
);
let HEALPixCell(depth, idx) = *cell;
let dir_idx = (idx / 10000) * 10000;
let url = format!("{hips_url}/Norder{depth}/Dir{dir_idx}/Npix{idx}_{channel:?}.{ext}");
let id = format!("{}_{}_{}_{}_{}", hips_cdid, depth, idx, channel, ext);
let tile_size = cfg.get_tile_size() as u32;
let size = cfg.get_tile_size();
Tile {
hips_cdid: hips_cdid.to_string(),
url,
cell: CellDesc::HiPSCube {
cell: *cell,
tile_size,
channel,
},
cell: *cell,
format,
credentials,
mode,
id,
create_bitmap_support: browser_support.create_image_bitmap,
}
}
pub fn new_cubic(
hpx_f_cell: &HEALPixFreqCell,
cfg: &HiPSConfig,
browser_support: &BrowserFeaturesSupport,
) -> Self {
let hips_cdid = cfg.get_creator_did();
let hips_url = cfg.get_root_url();
let format = cfg.get_format();
let credentials = cfg.get_request_credentials();
let mode = cfg.get_request_mode();
let ext = format.get_ext_file();
// f hash at order_f
let HEALPixFreqCell {
hpx: HEALPixCell(k, n),
f_hash: m,
f_depth: l,
} = *hpx_f_cell;
let d = (n / 10000) * 10000;
let e = (m / 10) * 10;
let url = format!("{hips_url}/Norder{k}_{l}/Dir{d}_{e}/Npix{n}_{m}.{ext}");
let id = format!("{hips_cdid}_{k}_{l}_{n}_{m}_{ext}");
let tile_size = cfg.get_tile_size() as u32;
let tile_depth = cfg.tile_depth.unwrap_or(1) as u32;
Tile {
hips_cdid: hips_cdid.to_string(),
url,
cell: CellDesc::HiPS3D {
cell: hpx_f_cell.clone(),
tile_size,
tile_depth,
},
format,
credentials,
mode,
id,
create_bitmap_support: browser_support.create_image_bitmap,
channel,
size: size as u32,
}
}
}
@@ -246,12 +121,12 @@ impl Allsky {
// handle cube case
if let Some(channel) = channel {
if channel > 0 {
url.push_str(&format!("_{channel:?}"));
url.push_str(&format!("_{:?}", channel));
}
}
// add the tile format
url.push_str(&format!(".{ext}"));
url.push_str(&format!(".{}", ext));
let id = format!(
"{}Allsky{}{}",
@@ -284,8 +159,43 @@ impl Query for Allsky {
}
/* ---------------------------------- */
use al_api::moc::MOCOptions;
pub struct PixelMetadata {
pub format: ImageFormatType,
// The root url of the HiPS
pub hips_cdid: CreatorDid,
// The total url of the query
pub url: Url,
pub id: QueryId,
}
impl PixelMetadata {
pub fn new(cfg: &HiPSConfig) -> Self {
let hips_cdid = cfg.get_creator_did().to_string();
let format = cfg.get_format();
let ext = format.get_ext_file();
let url = format!("{}/Norder3/Allsky.{}", cfg.get_root_url(), ext);
let id = format!("{}Allsky{}", hips_cdid, ext);
PixelMetadata {
hips_cdid,
url,
format,
id,
}
}
}
use super::request::blank::PixelMetadataRequest;
impl Query for PixelMetadata {
type Request = PixelMetadataRequest;
fn id(&self) -> &QueryId {
&self.id
}
}
use al_api::moc::MOCOptions;
/* ---------------------------------- */
pub struct Moc {
// The total url of the query
pub url: Url,
@@ -293,41 +203,21 @@ pub struct Moc {
pub credentials: RequestCredentials,
pub params: MOCOptions,
pub hips_cdid: CreatorDid,
pub dataproduct_type: DataproductType,
}
use std::collections::HashMap;
impl Moc {
pub fn new(
cfg: &HiPSConfig,
hips_local_files: &HashMap<String, HiPSLocalFiles>,
url: String,
mode: RequestMode,
credentials: RequestCredentials,
hips_cdid: CreatorDid,
params: MOCOptions,
) -> Self {
// Try to fetch the MOC
let hips_cdid = cfg.get_creator_did();
let url = if let Some(local_hips) = hips_local_files.get(hips_cdid) {
if let Ok(url) =
web_sys::Url::create_object_url_with_blob(local_hips.get_moc().as_ref())
{
url
} else {
format!("{}/Moc.fits", cfg.get_root_url())
}
} else {
format!("{}/Moc.fits", cfg.get_root_url())
};
let mode = cfg.get_request_mode();
let credentials = cfg.get_request_credentials();
let hips_cdid = cfg.get_creator_did().to_string();
let dataproduct_type = cfg.dataproduct_type;
Moc {
url,
params,
hips_cdid,
mode,
credentials,
dataproduct_type,
}
}
}

View File

@@ -1,9 +1,11 @@
use std::io::Cursor;
use crate::downloader::query;
use crate::renderable::CreatorDid;
use al_core::image::fits::FitsImage;
use al_core::image::format::ChannelType;
use al_core::image::ImageType;
use al_core::texture::format::PixelType;
use fitsrs::hdu::header::Bitpix;
use fitsrs::{fits::Fits, hdu::data::InMemData};
use super::{Request, RequestType};
use crate::downloader::QueryId;
@@ -14,13 +16,7 @@ pub struct AllskyRequest {
pub id: QueryId,
pub channel: Option<u32>,
pub request: Request<Vec<ImageType>>,
}
impl AllskyRequest {
pub fn missing(&self) -> bool {
self.request.data.borrow().is_none()
}
request: Request<Vec<ImageType>>,
}
impl From<AllskyRequest> for RequestType {
@@ -63,7 +59,7 @@ async fn query_allsky(
let raw_bytes = image_data.data();
Ok(ImageBuffer::from_raw_bytes(raw_bytes.0, w, h))
Ok(ImageBuffer::from_raw_bytes(raw_bytes.0, w as i32, h as i32))
}
impl From<query::Allsky> for AllskyRequest {
@@ -82,12 +78,12 @@ impl From<query::Allsky> for AllskyRequest {
} = query;
//let depth_tile = crate::math::utils::log_2_unchecked(texture_size / tile_size) as u8;
let channel = format.get_pixel_format();
let channel = format.get_channel();
let url_clone = url.clone();
let request = Request::new(async move {
match channel {
PixelType::RGB8U => {
ChannelType::RGB8U => {
let allsky = query_allsky(&url_clone, credentials).await?;
let allsky_tiles =
@@ -95,18 +91,12 @@ impl From<query::Allsky> for AllskyRequest {
.map(|image| {
let ImageBuffer { data, size } = image;
let data = data
.iter()
.into_iter()
.enumerate()
.filter(|&(i, _)| i % 4 != 3)
.map(|(_, v)| *v)
.collect::<Vec<_>>();
let image = ImageBuffer::new(
data.into_boxed_slice(),
size.0,
size.1,
size.2,
);
.map(|(_, v)| v)
.collect();
let image = ImageBuffer::new(data, size.x, size.y);
ImageType::RawRgb8u { image }
})
@@ -114,7 +104,7 @@ impl From<query::Allsky> for AllskyRequest {
Ok(allsky_tiles)
}
PixelType::RGBA8U => {
ChannelType::RGBA8U => {
let allsky = query_allsky(&url_clone, credentials).await?;
let allsky_tiles = handle_allsky_file(allsky, allsky_tile_size, tile_size)?
@@ -142,66 +132,61 @@ impl From<query::Allsky> for AllskyRequest {
// Convert the JS ReadableStream to a Rust stream
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;*/
let buf = JsFuture::from(resp.array_buffer()?).await?;
let raw_bytes = js_sys::Uint8Array::new(&buf).to_vec();
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
let bytes_buffer = js_sys::Uint8Array::new(&array_buffer);
let FitsImage {
raw_bytes, bitpix, ..
} = FitsImage::from_raw_bytes(raw_bytes.as_slice())?[0];
match bitpix {
Bitpix::U8 => {
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
let num_bytes = bytes_buffer.length() as usize;
let mut raw_bytes = vec![0; num_bytes];
bytes_buffer.copy_to(&mut raw_bytes[..]);
let mut reader = Cursor::new(&raw_bytes[..]);
let Fits { hdu } = Fits::from_reader(&mut reader)
.map_err(|_| JsValue::from_str("Parsing fits error of allsky"))?;
let data = hdu.get_data();
match data {
InMemData::U8(data) => {
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawR8ui { image })
.collect())
}
Bitpix::I16 => {
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
InMemData::I16(data) => {
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawR16i { image })
.collect())
}
Bitpix::I32 => {
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
InMemData::I32(data) => {
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawR32i { image })
.collect())
}
Bitpix::I64 => {
let data = unsafe {
std::slice::from_raw_parts(
raw_bytes.as_ptr() as *const i64,
raw_bytes.len() / 8,
)
};
InMemData::I64(data) => {
let data = data.iter().map(|v| *v as i32).collect::<Vec<_>>();
let raw_bytes = unsafe {
Ok(handle_allsky_fits(&data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawR32i { image })
.collect())
}
InMemData::F32(data) => {
let data = unsafe {
std::slice::from_raw_parts(
data.as_ptr() as *const u8,
data.len() * 4,
)
};
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawR32i { image })
.collect())
}
Bitpix::F32 => {
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawRgba8u { image })
.collect())
}
Bitpix::F64 => {
let data = unsafe {
std::slice::from_raw_parts(
raw_bytes.as_ptr() as *const f64,
raw_bytes.len() / 8,
)
};
InMemData::F64(data) => {
let data = data.iter().map(|v| *v as f32).collect::<Vec<_>>();
let raw_bytes = unsafe {
let data = unsafe {
std::slice::from_raw_parts(
data.as_ptr() as *const u8,
data.len() * 4,
)
};
Ok(handle_allsky_fits(raw_bytes, tile_size, allsky_tile_size)?
Ok(handle_allsky_fits(data, tile_size, allsky_tile_size)?
.map(|image| ImageType::RawRgba8u { image })
.collect())
}
@@ -221,9 +206,9 @@ impl From<query::Allsky> for AllskyRequest {
}
}
use al_core::image::format::ImageFormat;
use al_core::image::raw::ImageBufferView;
use al_core::texture::format::TextureFormat;
fn handle_allsky_file<F: TextureFormat>(
fn handle_allsky_file<F: ImageFormat>(
image: ImageBuffer<F>,
allsky_tile_size: i32,
tile_size: i32,
@@ -233,9 +218,9 @@ fn handle_allsky_file<F: TextureFormat>(
let mut src_idx = 0;
let tiles = (0..12).map(move |_| {
let mut base_tile = ImageBuffer::<F>::allocate(
&F::P::BLACK,
allsky_tile_size as u32,
allsky_tile_size as u32,
&<F as ImageFormat>::P::BLACK,
allsky_tile_size,
allsky_tile_size,
);
for idx_tile in 0..64 {
let (x, y) = crate::utils::unmortonize(idx_tile as u64);
@@ -268,8 +253,8 @@ fn handle_allsky_file<F: TextureFormat>(
Ok(tiles)
}
fn handle_allsky_fits<F: TextureFormat>(
image: &[<F::P as Pixel>::Item],
fn handle_allsky_fits<F: ImageFormat>(
image: &[<<F as ImageFormat>::P as Pixel>::Item],
tile_size: i32,
allsky_tile_size: i32,
@@ -283,14 +268,8 @@ fn handle_allsky_fits<F: TextureFormat>(
.rev()
.flatten()
.copied()
.collect::<Vec<_>>()
.into_boxed_slice();
let image = ImageBuffer::<F>::new(
reversed_rows_data,
width_allsky_px as u32,
height_allsky_px as u32,
1,
);
.collect::<Vec<_>>();
let image = ImageBuffer::<F>::new(reversed_rows_data, width_allsky_px, height_allsky_px);
let allsky_tiles_iter =
handle_allsky_file::<F>(image, allsky_tile_size, tile_size)?.map(move |image| {
@@ -307,17 +286,67 @@ fn handle_allsky_fits<F: TextureFormat>(
.cloned()
.collect();
ImageBuffer::<F>::new(
new_image_data,
allsky_tile_size as u32,
allsky_tile_size as u32,
1,
)
ImageBuffer::<F>::new(new_image_data, allsky_tile_size, allsky_tile_size)
});
Ok(allsky_tiles_iter)
}
use al_core::texture::format::RGBA8U;
use al_core::image::format::RGBA8U;
use crate::time::Time;
use std::cell::RefCell;
use std::rc::Rc;
pub struct Allsky {
pub image: Rc<RefCell<Option<Vec<ImageType>>>>,
pub time_req: Time,
//pub depth_tile: u8,
pub hips_cdid: CreatorDid,
url: Url,
pub channel: Option<u32>,
}
use crate::Abort;
impl Allsky {
pub fn missing(&self) -> bool {
self.image.borrow().is_none()
}
pub fn get_hips_cdid(&self) -> &CreatorDid {
&self.hips_cdid
}
pub fn get_url(&self) -> &Url {
&self.url
}
}
impl<'a> From<&'a AllskyRequest> for Option<Allsky> {
fn from(request: &'a AllskyRequest) -> Self {
let AllskyRequest {
request,
hips_cdid,
//depth_tile,
url,
channel,
..
} = request;
if request.is_resolved() {
let Request::<Vec<ImageType>> {
time_request, data, ..
} = request;
Some(Allsky {
time_req: *time_request,
// This is a clone on a Arc, it is supposed to be fast
image: data.clone(),
hips_cdid: hips_cdid.clone(),
url: url.clone(),
//depth_tile: *depth_tile,
channel: *channel,
})
} else {
None
}
}
}

View File

@@ -0,0 +1,161 @@
use al_core::image::format::ChannelType;
use std::io::Cursor;
use crate::downloader::query;
use crate::renderable::CreatorDid;
use fitsrs::fits::Fits;
#[derive(Debug, Clone, Copy)]
pub struct Metadata {
pub blank: f32,
pub scale: f32,
pub offset: f32,
}
impl Default for Metadata {
fn default() -> Self {
Metadata {
blank: -1.0,
scale: 1.0,
offset: 0.0,
}
}
}
use super::{Request, RequestType};
use crate::downloader::QueryId;
pub struct PixelMetadataRequest {
pub id: QueryId,
pub url: Url,
pub hips_cdid: CreatorDid,
request: Request<Metadata>,
}
impl From<PixelMetadataRequest> for RequestType {
fn from(request: PixelMetadataRequest) -> Self {
RequestType::PixelMetadata(request)
}
}
use super::Url;
use wasm_bindgen::JsCast;
use wasm_bindgen::JsValue;
use wasm_bindgen_futures::JsFuture;
use web_sys::{RequestInit, RequestMode, Response};
impl From<query::PixelMetadata> for PixelMetadataRequest {
// Create a tile request associated to a HiPS
fn from(query: query::PixelMetadata) -> Self {
let query::PixelMetadata {
format,
url,
hips_cdid,
id,
} = query;
let url_clone = url.clone();
let channel = format.get_channel();
let window = web_sys::window().unwrap_abort();
let request = match channel {
ChannelType::R32F | ChannelType::R32I | ChannelType::R16I | ChannelType::R8UI => {
Request::new(async move {
let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(RequestMode::Cors);
let request =
web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;
// See https://github.com/MattiasBuelens/wasm-streams/blob/f6dacf58a8826dc67923ab4a3bae87635690ca64/examples/fetch_as_stream.rs#L25-L33
/*let raw_body = resp.body().ok_or(JsValue::from_str("Cannot extract readable stream"))?;
let body = ReadableStream::from_raw(raw_body.dyn_into()?);
// Convert the JS ReadableStream to a Rust stream
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;
let image = Fits::new(reader).await?;*/
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
let bytes_buffer = js_sys::Uint8Array::new(&array_buffer);
let num_bytes = bytes_buffer.length() as usize;
let mut raw_bytes = vec![0; num_bytes];
bytes_buffer.copy_to(&mut raw_bytes[..]);
let mut reader = Cursor::new(&raw_bytes[..]);
let Fits { hdu } = Fits::from_reader(&mut reader)
.map_err(|_| JsValue::from_str("Parsing fits error"))?;
let header = hdu.get_header();
let scale =
if let Some(fitsrs::card::Value::Float(bscale)) = header.get(b"BSCALE ") {
*bscale as f32
} else {
1.0
};
let offset =
if let Some(fitsrs::card::Value::Float(bzero)) = header.get(b"BZERO ") {
*bzero as f32
} else {
0.0
};
let blank =
if let Some(fitsrs::card::Value::Float(blank)) = header.get(b"BLANK ") {
*blank as f32
} else {
f32::NAN
};
Ok(Metadata {
blank,
scale,
offset,
})
})
}
_ => Request::new(async move { Ok(Metadata::default()) }),
};
Self {
id,
url,
hips_cdid,
request,
}
}
}
use std::cell::RefCell;
use std::rc::Rc;
#[derive(Debug)]
pub struct PixelMetadata {
pub value: Rc<RefCell<Option<Metadata>>>,
pub hips_cdid: CreatorDid,
pub url: String,
}
use crate::Abort;
impl<'a> From<&'a PixelMetadataRequest> for Option<PixelMetadata> {
fn from(request: &'a PixelMetadataRequest) -> Self {
let PixelMetadataRequest {
request,
hips_cdid,
url,
..
} = request;
if request.is_resolved() {
let Request::<Metadata> { data, .. } = request;
// It will always be resolved and found as we will request a well know tile (Norder0/Tile0)
Some(PixelMetadata {
hips_cdid: hips_cdid.clone(),
url: url.to_string(),
value: data.clone(),
})
} else {
None
}
}
}

View File

@@ -3,15 +3,15 @@ use crate::renderable::CreatorDid;
use super::{Request, RequestType};
use crate::healpix::moc::Moc;
use crate::healpix::moc::{FreqSpaceMoc, SpaceMoc};
use al_api::hips::DataproductType;
use crate::healpix::coverage::Smoc;
use moclib::deser::fits::MocType;
use moclib::qty::Hpx;
pub struct MOCRequest {
//pub id: QueryId,
pub hips_cdid: CreatorDid,
pub params: MOCOptions,
pub request: Request<Moc>,
request: Request<HEALPixCoverage>,
}
impl From<MOCRequest> for RequestType {
@@ -19,14 +19,35 @@ impl From<MOCRequest> for RequestType {
RequestType::Moc(request)
}
}
use super::Url;
use moclib::deser::fits;
use wasm_bindgen::JsCast;
use wasm_bindgen_futures::JsFuture;
use web_sys::{RequestInit, Response};
use moclib::moc::range::op::convert::convert_to_u64;
/// Convenient type for Space-MOCs
pub fn from_fits_hpx<T: Idx>(moc: MocType<T, Hpx<T>, Cursor<&[u8]>>) -> Smoc {
match moc {
MocType::Ranges(moc) => convert_to_u64::<T, Hpx<T>, _, Hpx<u64>>(moc).into_range_moc(),
MocType::Cells(moc) => {
convert_to_u64::<T, Hpx<T>, _, Hpx<u64>>(moc.into_cell_moc_iter().ranges())
.into_range_moc()
}
}
}
use crate::healpix::coverage::HEALPixCoverage;
use crate::Abort;
use al_api::moc::MOCOptions;
use moclib::deser::fits::MocIdxType;
use moclib::deser::fits::MocQtyType;
use moclib::idx::Idx;
use moclib::moc::{CellMOCIntoIterator, CellMOCIterator, RangeMOCIterator};
use std::io::Cursor;
use wasm_bindgen::JsValue;
impl From<query::Moc> for MOCRequest {
// Create a tile request associated to a HiPS
fn from(query: query::Moc) -> Self {
@@ -36,7 +57,6 @@ impl From<query::Moc> for MOCRequest {
hips_cdid,
credentials,
mode,
dataproduct_type,
} = query;
let url_clone = url.clone();
@@ -55,20 +75,22 @@ impl From<query::Moc> for MOCRequest {
let resp: Response = resp_value.dyn_into()?;
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
let buf = js_sys::Uint8Array::new(&array_buffer);
let bytes = buf.to_vec();
let bytes_buf = js_sys::Uint8Array::new(&array_buffer);
let num_bytes = bytes_buf.length() as usize;
let mut bytes = vec![0; num_bytes];
bytes_buf.copy_to(&mut bytes[..]);
// Coosys is permissive because we load a moc
Ok(match dataproduct_type {
DataproductType::SpectralCube => {
Moc::FreqSpace(FreqSpaceMoc::from_fits_raw_bytes(&bytes)?)
}
DataproductType::Cube => {
let moc = SpaceMoc::from_fits_raw_bytes(&bytes)?;
Moc::FreqSpace(FreqSpaceMoc::from_space_moc(moc))
}
_ => Moc::Space(SpaceMoc::from_fits_raw_bytes(&bytes)?),
})
let smoc = match fits::from_fits_ivoa_custom(Cursor::new(&bytes[..]), true)
.map_err(|e| JsValue::from_str(&e.to_string()))?
{
MocIdxType::U16(MocQtyType::<u16, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U32(MocQtyType::<u32, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U64(MocQtyType::<u64, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
_ => Err(JsValue::from_str("MOC not supported. Must be a HPX MOC")),
}?;
Ok(HEALPixCoverage(smoc))
});
Self {
@@ -80,3 +102,39 @@ impl From<query::Moc> for MOCRequest {
}
}
}
use std::cell::RefCell;
use std::rc::Rc;
pub struct Moc {
pub moc: Rc<RefCell<Option<HEALPixCoverage>>>,
pub params: MOCOptions,
pub hips_cdid: Url,
}
impl Moc {
pub fn get_hips_cdid(&self) -> &Url {
&self.hips_cdid
}
}
impl<'a> From<&'a MOCRequest> for Option<Moc> {
fn from(request: &'a MOCRequest) -> Self {
let MOCRequest {
request,
hips_cdid,
params,
..
} = request;
if request.is_resolved() {
let Request::<HEALPixCoverage> { data, .. } = request;
Some(Moc {
// This is a clone on a Arc, it is supposed to be fast
moc: data.clone(),
hips_cdid: hips_cdid.clone(),
params: params.clone(),
})
} else {
None
}
}
}

View File

@@ -1,6 +1,7 @@
// A request image should not be used outside this module
// but contained inside a more specific type of query (e.g. for a tile or allsky)
pub mod allsky;
pub mod blank;
pub mod moc;
pub mod tile;
@@ -11,8 +12,8 @@ use std::cell::{Cell, RefCell};
use std::rc::Rc;
pub type Url = String;
pub struct Request<R> {
pub data: Rc<RefCell<Option<R>>>,
pub time_request: Time,
data: Rc<RefCell<Option<R>>>,
time_request: Time,
// Flag telling if the tile has been copied so that
// the HtmlImageElement can be reused to download another tile
//ready: bool,
@@ -75,19 +76,17 @@ where
pub fn resolve_status(&self) -> ResolvedStatus {
self.resolved.get()
}
pub fn get_data(&self) -> Rc<RefCell<Option<R>>> {
self.data.clone()
}
}
use allsky::AllskyRequest;
use blank::PixelMetadataRequest;
use moc::MOCRequest;
use tile::TileRequest;
pub enum RequestType {
Tile(TileRequest),
Allsky(AllskyRequest),
Moc(MOCRequest),
PixelMetadata(PixelMetadataRequest),
Moc(MOCRequest), //..
}
use crate::downloader::QueryId;
@@ -96,33 +95,38 @@ impl RequestType {
match self {
RequestType::Tile(request) => &request.id,
RequestType::Allsky(request) => &request.id,
RequestType::PixelMetadata(request) => &request.id,
RequestType::Moc(request) => &request.hips_cdid,
}
}
pub fn is_resolved(&self) -> bool {
match self {
RequestType::Tile(request) => request.request.is_resolved(),
RequestType::Allsky(request) => request.request.is_resolved(),
RequestType::Moc(request) => request.request.is_resolved(),
}
}
}
/*
impl From<RequestType> for Option<Resource> {
fn from(request: RequestType) -> Self {
impl<'a> From<&'a RequestType> for Option<Resource> {
fn from(request: &'a RequestType) -> Self {
match request {
RequestType::Tile(request) => Option::<Tile>::from(request).map(Resource::Tile),
RequestType::Allsky(request) => Option::<Allsky>::from(request).map(Resource::Allsky),
RequestType::Moc(request) => Option::<FetchedMoc>::from(request).map(Resource::Moc),
RequestType::PixelMetadata(request) => {
Option::<PixelMetadata>::from(request).map(Resource::PixelMetadata)
}
RequestType::Moc(request) => Option::<Moc>::from(request).map(Resource::Moc),
}
}
}*/
}
use crate::Abort;
use web_sys::RequestCredentials;
use allsky::Allsky;
use blank::PixelMetadata;
use moc::Moc;
use tile::Tile;
pub enum Resource {
Tile(Tile),
Allsky(Allsky),
PixelMetadata(PixelMetadata),
Moc(Moc),
}
use web_sys::RequestCredentials;
async fn query_html_image(
url: &str,
credentials: RequestCredentials,
@@ -151,39 +155,3 @@ async fn query_html_image(
Ok(image)
}
use wasm_bindgen::JsCast;
use web_sys::RequestInit;
use web_sys::RequestMode;
use web_sys::Response;
async fn query_bitmap_from_blob(
url: &str,
mode: RequestMode,
credentials: RequestCredentials,
) -> Result<web_sys::ImageBitmap, JsValue> {
let window = web_sys::window().unwrap_abort();
let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(mode);
opts.credentials(credentials);
let request = web_sys::Request::new_with_str_and_init(url, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;
if resp.ok() {
let blob = JsFuture::from(resp.blob()?)
.await?
.dyn_into::<web_sys::Blob>()?;
let image_bitmap = JsFuture::from(window.create_image_bitmap_with_blob(&blob)?).await?;
Ok(image_bitmap.into())
} else {
Err(JsValue::from_str(
"Response status code not between 200-299.",
))
}
}

View File

@@ -1,24 +1,23 @@
use crate::healpix::cell::HEALPixCell;
use crate::renderable::CreatorDid;
use al_core::image::format::ImageFormatType;
use al_core::texture::format::PixelType;
use al_core::image::format::{ChannelType, ImageFormatType, RGB8U, RGBA8U};
use crate::downloader::query;
use al_core::image::ImageType;
use super::super::query::CellDesc;
use super::Url;
use super::{Request, RequestType};
use crate::downloader::request::query_html_image;
use crate::downloader::QueryId;
pub struct TileRequest {
pub request: Request<ImageType>,
request: Request<ImageType>,
pub id: QueryId,
pub cell: CellDesc,
pub hips_cdid: CreatorDid,
pub url: Url,
pub format: ImageFormatType,
cell: HEALPixCell,
hips_cdid: CreatorDid,
url: Url,
format: ImageFormatType,
channel: Option<u32>,
}
impl From<TileRequest> for RequestType {
@@ -27,14 +26,11 @@ impl From<TileRequest> for RequestType {
}
}
use crate::downloader::request::query_bitmap_from_blob;
use al_core::image::bitmap::Bitmap;
use al_core::image::html::HTMLImage;
use wasm_bindgen::JsCast;
use wasm_bindgen::JsValue;
use wasm_bindgen_futures::JsFuture;
use web_sys::{RequestInit, Response};
impl From<query::Tile> for TileRequest {
// Create a tile request associated to a HiPS
fn from(query: query::Tile) -> Self {
@@ -46,93 +42,124 @@ impl From<query::Tile> for TileRequest {
credentials,
mode,
id,
create_bitmap_support,
channel: slice,
size,
} = query;
let url_clone = url.clone();
let pixel_format = format.get_pixel_format();
let channel = format.get_channel();
let size = match cell {
CellDesc::HiPS2D { tile_size, .. } | CellDesc::HiPSCube { tile_size, .. } => {
(tile_size, tile_size, 1)
}
CellDesc::HiPS3D {
tile_size,
tile_depth,
..
} => (tile_size, tile_size, tile_depth),
};
let window = web_sys::window().unwrap_abort();
let request = match channel {
ChannelType::RGB8U => Request::new(async move {
/*let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(RequestMode::Cors);
let request = match pixel_format {
PixelType::RGB8U => Request::new(async move {
if create_bitmap_support {
// optimized download of tile for GPU (using Blob + Bitmap) without creating any DOM structure
let image_bitmap =
query_bitmap_from_blob(&url_clone, mode, credentials).await?;
Ok(ImageType::ImageRgb8u {
image: Bitmap::new(image_bitmap),
})
} else {
// HTMLImageElement
let image = query_html_image(&url_clone, credentials).await?;
// The image has been resolved
Ok(ImageType::HTMLImageRgb8u {
image: HTMLImage::new(image),
})
}
}),
PixelType::RGBA8U => Request::new(async move {
if create_bitmap_support {
// optimized download of tile for GPU (using Blob + Bitmap) without creating any DOM structure
let image_bitmap =
query_bitmap_from_blob(&url_clone, mode, credentials).await?;
Ok(ImageType::ImageRgba8u {
image: Bitmap::new(image_bitmap),
})
} else {
// HTMLImageElement
let image = query_html_image(&url_clone, credentials).await?;
// The image has been resolved
Ok(ImageType::HTMLImageRgba8u {
image: HTMLImage::new(image),
})
}
}),
PixelType::R32F | PixelType::R32I | PixelType::R16I | PixelType::R8U => {
Request::new(async move {
let window = web_sys::window().unwrap_abort();
let request = web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;*/
let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(mode);
opts.credentials(credentials);
/*/// Bitmap version
let blob = JsFuture::from(resp.blob()?).await?.into();
let image = JsFuture::from(window.create_image_bitmap_with_blob(&blob)?)
.await?
.into();
let request =
web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;
// See https://github.com/MattiasBuelens/wasm-streams/blob/f6dacf58a8826dc67923ab4a3bae87635690ca64/examples/fetch_as_stream.rs#L25-L33
/*let raw_body = resp.body().ok_or(JsValue::from_str("Cannot extract readable stream"))?;
let body = ReadableStream::from_raw(raw_body.dyn_into()?);
let image = Bitmap::new(image);
Ok(ImageType::JpgImageRgb8u { image })*/
/*
/// Raw image decoding
// Convert the JS ReadableStream to a Rust stream
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;
let image = Fits::new(reader).await?;
*/
if resp.ok() {
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
let raw_bytes = js_sys::Uint8Array::new(&array_buffer);
let buf = JsFuture::from(resp.array_buffer()?).await?;
let raw_bytes = js_sys::Uint8Array::new(&buf).to_vec();
let image = ImageBuffer::<RGB8U>::from_raw_bytes(&raw_bytes[..], 512, 512)?;
Ok(ImageType::FitsRawBytes { raw_bytes, size })
} else {
Err(JsValue::from_str(
"Response status code not between 200-299.",
))
}
Ok(ImageType::RawRgb8u { image })
*/
// HTMLImageElement
let image = query_html_image(&url_clone, credentials).await?;
// The image has been resolved
Ok(ImageType::HTMLImageRgb8u {
image: HTMLImage::<RGB8U>::new(image),
})
}
}),
ChannelType::RGBA8U => Request::new(async move {
/*let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(RequestMode::Cors);
let request = web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;*/
/*/// Bitmap version
let blob = JsFuture::from(resp.blob()?).await?.into();
let image = JsFuture::from(window.create_image_bitmap_with_blob(&blob)?)
.await?
.into();
let image = Bitmap::new(image);
Ok(ImageType::PngImageRgba8u { image })*/
/*
/// Raw image decoding
let buf = JsFuture::from(resp.array_buffer()?).await?;
let raw_bytes = js_sys::Uint8Array::new(&buf).to_vec();
let image = ImageBuffer::<RGBA8U>::from_raw_bytes(&raw_bytes[..], 512, 512)?;
Ok(ImageType::RawRgba8u { image })
*/
// HTMLImageElement
let image = query_html_image(&url_clone, credentials).await?;
// The image has been resolved
Ok(ImageType::HTMLImageRgba8u {
image: HTMLImage::<RGBA8U>::new(image),
})
}),
ChannelType::R32F
| ChannelType::R64F
| ChannelType::R32I
| ChannelType::R16I
| ChannelType::R8UI => Request::new(async move {
let mut opts = RequestInit::new();
opts.method("GET");
opts.mode(mode);
opts.credentials(credentials);
let request =
web_sys::Request::new_with_str_and_init(&url_clone, &opts).unwrap_abort();
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
debug_assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;
// See https://github.com/MattiasBuelens/wasm-streams/blob/f6dacf58a8826dc67923ab4a3bae87635690ca64/examples/fetch_as_stream.rs#L25-L33
/*let raw_body = resp.body().ok_or(JsValue::from_str("Cannot extract readable stream"))?;
let body = ReadableStream::from_raw(raw_body.dyn_into()?);
// Convert the JS ReadableStream to a Rust stream
let mut reader = body.try_into_async_read().map_err(|_| JsValue::from_str("readable stream locked"))?;
let image = Fits::new(reader).await?;
*/
if resp.ok() {
let array_buffer = JsFuture::from(resp.array_buffer()?).await?;
let raw_bytes = js_sys::Uint8Array::new(&array_buffer);
Ok(ImageType::FitsImage {
raw_bytes,
size: (size, size),
})
} else {
Err(JsValue::from_str(
"Response status code not between 200-299.",
))
}
}),
_ => todo!(),
};
Self {
@@ -142,8 +169,74 @@ impl From<query::Tile> for TileRequest {
hips_cdid,
url,
request,
channel: slice,
}
}
}
use crate::time::Time;
use std::cell::RefCell;
use std::rc::Rc;
pub struct Tile {
pub image: Rc<RefCell<Option<ImageType>>>,
pub time_req: Time,
pub cell: HEALPixCell,
pub format: ImageFormatType,
pub channel: Option<u32>,
hips_cdid: CreatorDid,
url: Url,
}
use crate::Abort;
impl Tile {
#[inline(always)]
pub fn missing(&self) -> bool {
self.image.borrow().is_none()
}
#[inline(always)]
pub fn get_hips_cdid(&self) -> &CreatorDid {
&self.hips_cdid
}
#[inline(always)]
pub fn get_url(&self) -> &Url {
&self.url
}
#[inline(always)]
pub fn cell(&self) -> &HEALPixCell {
&self.cell
}
}
impl<'a> From<&'a TileRequest> for Option<Tile> {
fn from(request: &'a TileRequest) -> Self {
let TileRequest {
cell,
request,
hips_cdid,
url,
format,
channel,
..
} = request;
if request.is_resolved() {
let Request::<ImageType> {
time_request, data, ..
} = request;
Some(Tile {
cell: *cell,
time_req: *time_request,
// This is a clone on a Arc, it is supposed to be fast
image: data.clone(),
hips_cdid: hips_cdid.clone(),
url: url.clone(),
format: *format,
channel: *channel,
})
} else {
None
}
}
}

View File

@@ -1,18 +0,0 @@
use wasm_bindgen::prelude::*;
use web_sys::{window, CustomEvent, CustomEventInit};
pub(crate) fn send_custom_event(name: &str, value: JsValue) {
// Create event details (optional)
let mut event_init = CustomEventInit::new();
event_init.detail(&value);
// Create the event
let event = CustomEvent::new_with_event_init_dict(name, &event_init)
.expect("Failed to create custom event");
// Dispatch the event on the window or any target element
window()
.expect("no global `window` exists")
.dispatch_event(&event)
.expect("failed to dispatch event");
}

View File

@@ -15,7 +15,6 @@ use healpix::compass_point::MainWind;
use healpix::compass_point::Ordinal;
use healpix::compass_point::OrdinalMap;
use crate::math::lonlat::LonLatT;
use crate::utils;
impl HEALPixCell {
@@ -96,13 +95,6 @@ impl HEALPixCell {
self.depth() == 0
}
#[inline(always)]
pub fn hash_with_dxdy(depth: u8, lon: f64, lat: f64) -> (Self, f64, f64) {
let (hash, dx, dy) = healpix::nested::hash_with_dxdy(depth, lon, lat);
(HEALPixCell(depth, hash), dx, dy)
}
// Find the smallest HEALPix cell containing self and another cells
// Returns None if the 2 HEALPix cell are not located in the same base HEALPix cell
#[inline]
@@ -491,83 +483,6 @@ impl Ord for HEALPixCell {
}
}
/// A simple object describing a cubic tile of a HiPS3D
#[derive(Eq, Hash, PartialEq, Clone, Debug)]
pub struct HEALPixFreqCell {
pub hpx: HEALPixCell,
pub f_hash: u64,
pub f_depth: u8,
}
use crate::math::spectra::Freq;
use crate::math::spectra::SpectralUnit;
impl HEALPixFreqCell {
pub fn from_lonlat(lonlat: LonLatT<f64>, freq: Freq, s_depth: u8, f_depth: u8) -> Self {
let hpx = HEALPixCell::new(
s_depth,
lonlat.lon().to_radians(),
lonlat.lat().to_radians(),
);
let f_hash = freq.hash(f_depth);
Self {
hpx,
f_hash,
f_depth,
}
}
pub fn new(hpx: HEALPixCell, f_hash: u64, f_depth: u8) -> Self {
Self {
hpx,
f_hash,
f_depth,
}
}
pub fn hpx_parent(&self) -> Self {
Self {
hpx: self.hpx.parent(),
f_hash: self.f_hash,
f_depth: self.f_depth,
}
}
pub fn parent(&self) -> Self {
Self {
hpx: self.hpx.parent(),
f_hash: self.f_hash >> 1,
f_depth: self.f_depth - 1,
}
}
pub fn is_hpx_root(&self) -> bool {
self.hpx.is_root()
}
pub fn freq_range(&self) -> Range<Freq> {
let f0 = Freq::from_hash_with_order(self.f_hash, self.f_depth);
let f1 = Freq::from_hash_with_order(
(self.f_hash + 1).min(Freq::num_max_cells(self.f_depth) as u64),
self.f_depth,
);
f0..f1
}
pub fn pixel_frequencies(&self, num_pixels: usize) -> impl Iterator<Item = f32> {
let delta_depth = num_pixels.trailing_zeros();
let pixel_depth = self.f_depth + delta_depth as u8;
let h0 = self.f_hash << delta_depth;
let h1 = (self.f_hash + 1) << delta_depth;
(h0..h1).map(move |hash| Freq::from_hash_with_order(hash, pixel_depth).0 as f32)
}
}
// Utils
#[inline(always)]
pub fn nside2depth(nside: u32) -> u8 {

View File

@@ -1,8 +1,8 @@
use crate::math::lonlat::LonLat;
use crate::math::lonlat::LonLatT;
use crate::math::PI;
use crate::math::{self, lonlat::LonLat};
use moclib::moc::RangeMOCIntoIterator;
use cgmath::Vector3;
use moclib::{
moc::range::{CellSelection, RangeMOC},
qty::Hpx,
@@ -12,63 +12,9 @@ pub type Smoc = RangeMOC<u64, Hpx<u64>>;
use crate::healpix::cell::HEALPixCell;
#[derive(Clone, Debug)]
pub struct SpaceMoc(pub Smoc);
use wasm_bindgen::JsValue;
use moclib::deser::fits;
use moclib::deser::fits::MocIdxType;
use moclib::deser::fits::MocQtyType;
use moclib::idx::Idx;
use moclib::moc::range::op::convert::convert_to_u64;
use moclib::moc::{CellMOCIntoIterator, CellMOCIterator, RangeMOCIterator};
/// Convenient type for Space-MOCs
pub fn from_fits_hpx<T: Idx>(moc: MocType<T, Hpx<T>, Cursor<&[u8]>>) -> Smoc {
match moc {
MocType::Ranges(moc) => convert_to_u64::<T, Hpx<T>, _, Hpx<u64>>(moc).into_range_moc(),
MocType::Cells(moc) => {
convert_to_u64::<T, Hpx<T>, _, Hpx<u64>>(moc.into_cell_moc_iter().ranges())
.into_range_moc()
}
}
}
use moclib::deser::fits::MocType;
use std::io::Cursor;
impl SpaceMoc {
pub fn from_fits_raw_bytes(bytes: &[u8]) -> Result<Self, JsValue> {
let smoc = match fits::from_fits_ivoa_custom(Cursor::new(bytes), true)
.map_err(|e| JsValue::from_str(&e.to_string()))?
{
MocIdxType::U16(MocQtyType::<u16, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U32(MocQtyType::<u32, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U64(MocQtyType::<u64, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
_ => Err(JsValue::from_str("MOC not supported. Must be a HPX MOC")),
}?;
Ok(Self(smoc))
}
pub fn from_json(s: &str) -> Result<Self, JsValue> {
let moc = moclib::deser::json::from_json_aladin::<u64, Hpx<u64>>(s)
.map_err(|e| JsValue::from(js_sys::Error::new(&e.to_string())))?
.into_cell_moc_iter()
.ranges()
.into_range_moc();
Ok(Self(moc))
}
pub fn serialize_to_json(&self) -> Result<String, JsValue> {
let mut buf: Vec<u8> = Default::default();
(&self.0)
.into_range_moc_iter()
.cells()
.to_json_aladin(None, &mut buf)
.map(|()| unsafe { String::from_utf8_unchecked(buf) })
.map_err(|err| JsValue::from_str(&format!("{err:?}")))
}
pub struct HEALPixCoverage(pub Smoc);
impl HEALPixCoverage {
pub fn from_3d_coos<T: LonLat<f64>>(
// The depth of the smallest HEALPix cells contained in it
depth: u8,
@@ -92,7 +38,7 @@ impl SpaceMoc {
depth,
CellSelection::All,
);
SpaceMoc(moc)
HEALPixCoverage(moc)
}
pub fn from_fixed_hpx_cells(
@@ -101,7 +47,7 @@ impl SpaceMoc {
cap: Option<usize>,
) -> Self {
let moc = RangeMOC::from_fixed_depth_cells(depth, hpx_idx, cap);
SpaceMoc(moc)
HEALPixCoverage(moc)
}
pub fn from_hpx_cells<'a>(
@@ -112,14 +58,14 @@ impl SpaceMoc {
let cells_it = hpx_cell_it.map(|HEALPixCell(depth, idx)| (*depth, *idx));
let moc = RangeMOC::from_cells(depth, cells_it, cap);
SpaceMoc(moc)
HEALPixCoverage(moc)
}
pub fn from_cone(lonlat: &LonLatT<f64>, rad: f64, depth: u8) -> Self {
if rad >= PI {
Self::allsky(depth)
} else {
SpaceMoc(RangeMOC::from_cone(
HEALPixCoverage(RangeMOC::from_cone(
lonlat.lon().to_radians(),
lonlat.lat().to_radians(),
rad,
@@ -132,7 +78,12 @@ impl SpaceMoc {
pub fn allsky(depth_max: u8) -> Self {
let moc = RangeMOC::new_full_domain(depth_max);
SpaceMoc(moc)
HEALPixCoverage(moc)
}
pub fn contains_coo(&self, coo: &Vector3<f64>) -> bool {
let (lon, lat) = math::lonlat::xyz_to_radec(coo);
self.0.is_in(lon.to_radians(), lat.to_radians())
}
pub fn contains_lonlat(&self, lonlat: &LonLatT<f64>) -> bool {
@@ -147,9 +98,9 @@ impl SpaceMoc {
self.0.moc_ranges().intersects_range(&z29_rng)
}
/*pub fn is_intersecting(&self, other: &Self) -> bool {
pub fn is_intersecting(&self, other: &Self) -> bool {
!self.0.intersection(&other.0).is_empty()
}*/
}
pub fn depth(&self) -> u8 {
self.0.depth_max()
@@ -160,16 +111,16 @@ impl SpaceMoc {
}
pub fn not(&self) -> Self {
SpaceMoc(self.0.not())
HEALPixCoverage(self.0.not())
}
pub fn empty(depth: u8) -> Self {
SpaceMoc(RangeMOC::new_empty(depth))
HEALPixCoverage(RangeMOC::new_empty(depth))
}
}
use core::ops::Deref;
impl Deref for SpaceMoc {
impl Deref for HEALPixCoverage {
type Target = Smoc;
fn deref(&'_ self) -> &'_ Self::Target {

View File

@@ -1,131 +0,0 @@
use crate::healpix::cell::HEALPixFreqCell;
use moclib::hpxranges2d::HpxRanges2D;
use moclib::ranges::ranges2d::Ranges2D;
use moclib::qty::{Frequency, MocQty};
#[derive(Debug)]
pub struct FreqSpaceMoc(pub moclib::hpxranges2d::FreqSpaceMoc<u64, u64>);
impl Clone for FreqSpaceMoc {
fn clone(&self) -> Self {
let HpxRanges2D(Moc2DRanges {
ranges2d: Ranges2D { x, y },
..
}) = &**self;
Self(HpxRanges2D(Moc2DRanges::new(x.clone(), y.clone())))
}
}
use wasm_bindgen::JsValue;
use moclib::deser::fits;
use moclib::deser::fits::MocIdxType;
use moclib::deser::fits::MocQtyType;
use moclib::mocranges2d::Moc2DRanges;
use std::io::Cursor;
impl FreqSpaceMoc {
/// Create a FreqSpaceMoc from a
pub fn from_space_moc(moc: SpaceMoc) -> Self {
let moc_2d = Moc2DRanges::new(vec![0..u64::MAX; 1], vec![moc.0.into_moc_ranges().0]);
FreqSpaceMoc(HpxRanges2D(moc_2d))
}
pub fn from_fits_raw_bytes(bytes: &[u8]) -> Result<Self, JsValue> {
let sfmoc = match fits::from_fits_ivoa_custom(Cursor::new(bytes), true)
.map_err(|e| JsValue::from_str(&e.to_string()))?
{
//MocIdxType::U16(MocQtyType::<u16, _>::FreqHpx(moc)) => Ok(from_fits_hpx(moc)),
//MocIdxType::U32(MocQtyType::<u32, _>::FreqHpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U64(MocQtyType::<u64, _>::FreqHpx(ranges_iter)) => {
/*al_core::log(&format!(
"ranges moc 2D iter from fits {:?}",
));*/
let moc_2d_ranges = Moc2DRanges::from_ranges_it(ranges_iter);
let inner = moclib::hpxranges2d::HpxRanges2D(moc_2d_ranges);
Ok(inner)
}
_ => Err(JsValue::from_str(
"MOC not supported. Must be a FREQ|HPX 2DMOC coded on U64 only",
)),
}?;
Ok(Self(sfmoc))
}
/*pub fn from_fixed_hpx_cells(
depth: u8,
hpx_idx: impl Iterator<Item = u64>,
cap: Option<usize>,
) -> Self {
let moc = RangeMOC::from_fixed_depth_cells(depth, hpx_idx, cap);
SpaceMoc(moc)
}
pub fn from_hpx_cells<'a>(
depth: u8,
hpx_cell_it: impl Iterator<Item = &'a HEALPixCell>,
cap: Option<usize>,
) -> Self {
let cells_it = hpx_cell_it.map(|HEALPixCell(depth, idx)| (*depth, *idx));
let moc = RangeMOC::from_cells(depth, cells_it, cap);
SpaceMoc(moc)
}*/
pub fn f_max_depth(&self) -> u8 {
self.0.compute_min_depth().0
}
pub fn s_max_depth(&self) -> u8 {
self.0.compute_min_depth().1
}
pub fn sky_fraction(&self) -> f64 {
todo!()
}
pub fn intersects_cell(&self, cell: &HEALPixFreqCell) -> bool {
let HEALPixFreqCell {
hpx,
f_hash,
f_depth,
} = *cell;
let f_hash_0 = f_hash << (Frequency::<u64>::MAX_DEPTH - f_depth);
let f_hash_1 = (f_hash + 1) << (Frequency::<u64>::MAX_DEPTH - f_depth);
//let f0 = Frequency::<u64>::hash2freq(5171582628058365952);
//let f1 = Frequency::<u64>::hash2freq(5171590187200806912);
//al_core::log(&format!("F1: {f0}"));
let hpx_ranges_2d = HpxRanges2D::create_from_freq_ranges_positions(
vec![f_hash_0..f_hash_1; 1],
vec![hpx.idx()],
Frequency::<u64>::MAX_DEPTH,
hpx.depth(),
);
!self.0.intersection(&hpx_ranges_2d).is_empty()
}
/*/// provide the list of (hash hpx, hash freq) of the cells contained in the sfmoc
pub fn cells(&self) -> impl Iterator<Item = (u64, u64)> {
todo!()
}*/
}
use core::ops::Deref;
use super::SpaceMoc;
impl Deref for FreqSpaceMoc {
type Target = moclib::hpxranges2d::FreqSpaceMoc<u64, u64>;
fn deref(&'_ self) -> &'_ Self::Target {
&self.0
}
}

View File

@@ -1,10 +0,0 @@
mod freq_space;
mod space;
pub use freq_space::FreqSpaceMoc;
pub use space::SpaceMoc;
pub enum Moc {
FreqSpace(FreqSpaceMoc),
Space(SpaceMoc),
}

View File

@@ -1,4 +1,4 @@
pub mod cell;
pub mod coverage;
pub mod index_vector;
pub mod moc;
pub mod utils;

View File

@@ -39,7 +39,7 @@ pub trait Abort {
impl<T> Abort for Option<T> {
type Item = T;
#[inline(always)]
#[inline]
fn unwrap_abort(self) -> Self::Item {
use std::process;
match self {
@@ -51,7 +51,7 @@ impl<T> Abort for Option<T> {
impl<T, E> Abort for Result<T, E> {
type Item = T;
#[inline(always)]
#[inline]
fn unwrap_abort(self) -> Self::Item {
use std::process;
match self {
@@ -65,7 +65,7 @@ extern crate serde_json;
#[macro_use]
extern crate enum_dispatch;
#[inline(always)]
#[inline]
pub fn unwrap_abort<T>(o: Option<T>) -> T {
use std::process;
match o {
@@ -85,14 +85,13 @@ mod utils;
use math::projection::*;
use moclib::moc::RangeMOCIntoIterator;
//use votable::votable::VOTableWrapper;
use crate::tile_fetcher::HiPSLocalFiles;
use al_api::moc::MOCOptions;
use wasm_bindgen::prelude::*;
use web_sys::HtmlElement;
use fitsrs::{WCSParams, WCS};
use crate::math::angle::ToAngle;
mod app;
@@ -100,10 +99,8 @@ pub mod async_task;
mod camera;
mod shaders;
mod browser_support;
mod coosys;
mod downloader;
mod event;
mod fifo_cache;
mod healpix;
mod inertia;
@@ -113,10 +110,16 @@ mod shader;
mod tile_fetcher;
mod time;
use crate::downloader::request::moc::from_fits_hpx;
use crate::{
camera::CameraViewPort, healpix::moc::SpaceMoc, math::lonlat::LonLatT, shader::ShaderManager,
time::DeltaTime,
camera::CameraViewPort, healpix::coverage::HEALPixCoverage, math::lonlat::LonLatT,
shader::ShaderManager, time::DeltaTime,
};
use moclib::deser::fits;
use moclib::deser::fits::MocIdxType;
use moclib::deser::fits::MocQtyType;
use std::io::Cursor;
use al_api::color::{Color, ColorRGBA};
use al_api::coo_system::CooSystem;
@@ -131,6 +134,10 @@ use cgmath::{Vector2, Vector3};
use crate::healpix::cell::HEALPixCell;
use math::angle::ArcDeg;
use moclib::{
moc::{CellMOCIntoIterator, CellMOCIterator, RangeMOCIterator},
qty::Hpx,
};
#[wasm_bindgen]
pub struct WebClient {
@@ -343,31 +350,33 @@ impl WebClient {
Ok(())
}
#[wasm_bindgen(js_name = addFITSImage)]
pub fn add_fits_image(
#[wasm_bindgen(js_name = addImageFITS)]
pub fn add_image_fits(
&mut self,
bytes: &[u8],
stream: web_sys::ReadableStream,
cfg: JsValue,
layer: String,
) -> Result<js_sys::Promise, JsValue> {
let cfg: ImageMetadata = serde_wasm_bindgen::from_value(cfg)?;
self.app.add_fits_image(bytes, cfg, layer)
self.app.add_image_fits(stream, cfg, layer)
}
#[wasm_bindgen(js_name = addRGBAImage)]
pub fn add_rgba_image(
#[wasm_bindgen(js_name = addImageWithWCS)]
pub fn add_image_with_wcs(
&mut self,
bytes: &[u8],
stream: web_sys::ReadableStream,
wcs: JsValue,
cfg: JsValue,
layer: String,
) -> Result<js_sys::Promise, JsValue> {
use wcs::{WCSParams, WCS};
let cfg: ImageMetadata = serde_wasm_bindgen::from_value(cfg)?;
let wcs_params: WCSParams = serde_wasm_bindgen::from_value(wcs)?;
let wcs = WCS::new(&wcs_params).map_err(|e| JsValue::from_str(&format!("{:?}", e)))?;
let wcs = WCS::new(&wcs_params).map_err(|e| JsValue::from_str(&format!("{e:?}")))?;
self.app.add_rgba_image(layer, bytes, wcs, cfg)
self.app
.add_image_from_blob_and_wcs(layer, stream, wcs, cfg)
}
#[wasm_bindgen(js_name = removeLayer)]
@@ -412,24 +421,9 @@ impl WebClient {
self.app.set_image_hips_color_cfg(layer, meta)
}
#[wasm_bindgen(js_name = setFreq)]
pub fn set_hips_frequency(&mut self, layer: String, frequency: f32) -> Result<(), JsValue> {
self.app.set_hips_frequency(&layer, frequency)
}
#[wasm_bindgen(js_name = getFreq)]
pub fn get_hips_frequency(&mut self, layer: String) -> Result<f32, JsValue> {
self.app.get_hips_frequency(&layer)
}
#[wasm_bindgen(js_name = freq2hash)]
pub fn get_freq_hash(&mut self, layer: String, freq: f64) -> Result<u64, JsValue> {
self.app.get_freq_hash(&layer, freq)
}
#[wasm_bindgen(js_name = hash2freq)]
pub fn get_freq_from_hash(&mut self, layer: String, hash: u64) -> Result<f64, JsValue> {
self.app.get_freq_from_hash(&layer, hash)
#[wasm_bindgen(js_name = setSliceNumber)]
pub fn set_hips_slice_number(&mut self, layer: String, slice: u32) -> Result<(), JsValue> {
self.app.set_hips_slice_number(&layer, slice)
}
#[wasm_bindgen(js_name = setBackgroundColor)]
@@ -988,7 +982,7 @@ impl WebClient {
let grad = colorgrad::CustomGradient::new()
.colors(&rgba_colors?)
.build()
.map_err(|err| JsValue::from_str(&format!("{err:?}")))?;
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?;
let cmap = Colormap::new(&label, grad);
self.app.add_cmap(label, cmap)?;
@@ -1068,8 +1062,13 @@ impl WebClient {
pub fn add_json_moc(&mut self, options: MOCOptions, data: &JsValue) -> Result<(), JsValue> {
let str: String = js_sys::JSON::stringify(data)?.into();
let smoc = SpaceMoc::from_json(&str)?;
self.app.add_moc(smoc, options)?;
let moc = moclib::deser::json::from_json_aladin::<u64, Hpx<u64>>(&str)
.map_err(|e| JsValue::from(js_sys::Error::new(&e.to_string())))?
.into_cell_moc_iter()
.ranges()
.into_range_moc();
self.app.add_moc(HEALPixCoverage(moc), options)?;
Ok(())
}
@@ -1077,8 +1076,18 @@ impl WebClient {
#[wasm_bindgen(js_name = addFITSMOC)]
pub fn add_fits_moc(&mut self, options: MOCOptions, data: &[u8]) -> Result<(), JsValue> {
//let bytes = js_sys::Uint8Array::new(array_buffer).to_vec();
let smoc = SpaceMoc::from_fits_raw_bytes(data)?;
self.app.add_moc(smoc, options)?;
let moc = match fits::from_fits_ivoa_custom(Cursor::new(data), false)
.map_err(|e| JsValue::from_str(&e.to_string()))?
{
MocIdxType::U16(MocQtyType::<u16, _>::Hpx(moc)) => {
Ok(crate::downloader::request::moc::from_fits_hpx(moc))
}
MocIdxType::U32(MocQtyType::<u32, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
MocIdxType::U64(MocQtyType::<u64, _>::Hpx(moc)) => Ok(from_fits_hpx(moc)),
_ => Err(JsValue::from_str("MOC not supported. Must be a HPX MOC")),
}?;
self.app.add_moc(HEALPixCoverage(moc), options)?;
Ok(())
}
@@ -1093,7 +1102,7 @@ impl WebClient {
) -> Result<(), JsValue> {
let tile_d = self.app.get_norder();
let pixel_d = tile_d + 9;
let moc = SpaceMoc::from_cone(
let moc = HEALPixCoverage::from_cone(
&LonLatT::new(
ra_deg.to_radians().to_angle(),
dec_deg.to_radians().to_angle(),
@@ -1127,7 +1136,7 @@ impl WebClient {
let v_in = &Vector3::new(1.0, 0.0, 0.0);
let mut moc = SpaceMoc::from_3d_coos(pixel_d as u8 - 1, vertex_it, v_in);
let mut moc = HEALPixCoverage::from_3d_coos(pixel_d as u8 - 1, vertex_it, v_in);
if moc.sky_fraction() > 0.5 {
moc = moc.not();
}
@@ -1173,9 +1182,15 @@ impl WebClient {
.get_moc(&moc_uuid)
.ok_or_else(|| JsValue::from(js_sys::Error::new("MOC not found")))?;
let json = moc.serialize_to_json()?;
let mut buf: Vec<u8> = Default::default();
let json = (&moc.0)
.into_range_moc_iter()
.cells()
.to_json_aladin(None, &mut buf)
.map(|()| unsafe { String::from_utf8_unchecked(buf) })
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))?;
serde_wasm_bindgen::to_value(&json).map_err(|err| JsValue::from_str(&format!("{err:?}")))
serde_wasm_bindgen::to_value(&json).map_err(|err| JsValue::from_str(&format!("{:?}", err)))
}
#[wasm_bindgen(js_name = getMOCSkyFraction)]

View File

@@ -49,21 +49,12 @@ where
}
use crate::math::angle::ToAngle;
impl From<fitsrs::wcs::LonLat> for LonLatT<f64> {
fn from(lonlat: fitsrs::wcs::LonLat) -> Self {
impl From<wcs::LonLat> for LonLatT<f64> {
fn from(lonlat: wcs::LonLat) -> Self {
Self(lonlat.lon().to_angle(), lonlat.lat().to_angle())
}
}
impl<S: BaseFloat> From<&'_ Vector3<S>> for LonLatT<S> {
fn from(v: &'_ Vector3<S>) -> Self {
let lon = Rad(v.x.atan2(v.z));
let lat = Rad(v.y.atan2((v.x * v.x + v.z * v.z).sqrt()));
LonLatT::new(Angle::new(lon), Angle::new(lat))
}
}
impl<S> LonLat<S> for LonLatT<S>
where
S: BaseFloat,
@@ -107,7 +98,10 @@ where
#[inline]
fn lonlat(&self) -> LonLatT<S> {
self.into()
let lon = Rad(self.x.atan2(self.z));
let lat = Rad(self.y.atan2((self.x * self.x + self.z * self.z).sqrt()));
LonLatT::new(Angle::new(lon), Angle::new(lat))
}
#[inline]

View File

@@ -9,8 +9,6 @@ pub const SQRT_TWO: f64 = std::f64::consts::SQRT_2;
pub const ZERO: f64 = 0.0;
pub mod spectra;
pub mod angle;
pub mod lonlat;
pub mod projection;

View File

@@ -1,104 +0,0 @@
pub trait SpectralUnit: Into<Freq> + Clone + Copy {
fn hash(&self, depth: u8) -> u64 {
let f: Freq = (*self).into();
let f_hash_max_order = Frequency::<u64>::freq2hash(f.0);
f_hash_max_order >> (Frequency::<u64>::MAX_DEPTH - depth)
}
}
use moclib::qty::{Frequency, MocQty};
pub const FREQ_MAX: Freq = Freq(5.846_006_549_323_611e48);
pub const FREQ_MIN: Freq = Freq(5.048_709_793_414_476e-29);
/// Frequency in Hz unit
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
pub struct Freq(pub f64);
impl Freq {
pub fn from_hash(hash: u64) -> Self {
let f = Frequency::hash2freq(hash);
Freq(f)
}
pub fn from_hash_with_order(hash: u64, order: u8) -> Self {
let hash_max_order = hash << (Frequency::<u64>::MAX_DEPTH - order);
let f = Frequency::hash2freq(hash_max_order);
Freq(f)
}
pub fn max(&self, other: Self) -> Self {
Freq(self.0.max(other.0))
}
pub fn min(&self, other: Self) -> Self {
Freq(self.0.min(other.0))
}
pub fn num_max_cells(order: u8) -> usize {
(Frequency::<u64>::n_cells_max() >> (Frequency::<u64>::MAX_DEPTH - order)) as usize
}
}
use std::ops::Sub;
impl Sub for Freq {
type Output = Self;
fn sub(self, other: Self) -> Self::Output {
Self(self.0 - other.0)
}
}
use std::ops::Add;
impl Add for Freq {
type Output = Self;
fn add(self, other: Self) -> Self::Output {
Self(self.0 + other.0)
}
}
/// Wavelength in meter unit
#[derive(Clone, Copy)]
pub struct Wavelength(pub f64);
/// Velocity in meter/sec unit
#[derive(Clone, Copy)]
pub struct Velocity {
/// A rest frequency to compute the velocity from
/// given by the obs_restfreq HiPS property
rest_freq: Freq,
/// The velocity in m/s
velocity: f64,
}
const SPEED_OF_LIGHT: f64 = 299792458.0;
impl From<Velocity> for Freq {
fn from(v: Velocity) -> Self {
let Velocity {
rest_freq,
velocity,
} = v;
// v = c * (of - f) / of
// v * of = c * (of - f)
// c * f = c * of - v * of = of * (c - v)
// f = of * (c - v) / c = of * (1 - v / c)
Freq(rest_freq.0 * (1.0 - velocity / SPEED_OF_LIGHT))
}
}
impl From<Wavelength> for Freq {
fn from(lambda: Wavelength) -> Self {
Freq(SPEED_OF_LIGHT / lambda.0)
}
}
impl SpectralUnit for Freq {}
impl SpectralUnit for Wavelength {}
impl SpectralUnit for Velocity {}

View File

@@ -223,7 +223,7 @@ impl Manager {
pub fn get_mut_catalog(&mut self, name: &str) -> Result<&mut Catalog, Error> {
self.catalogs.get_mut(name).ok_or(Error::CatalogNotPresent {
message: format!("{name} catalog is not present!"),
message: format!("{} catalog is not present!", name),
})
}

View File

@@ -198,15 +198,14 @@ impl ProjetedGrid {
if self.enabled {
let fov = camera.get_field_of_view();
let bbox = fov.get_bounding_box();
//let max_dim_px = camera.get_width().max(camera.get_height()) as f64;
//let step_line_px = max_dim_px * 0.15;
let aspect = camera.get_aspect() as f64;
let max_dim_px = camera.get_width().max(camera.get_height()) as f64;
let step_line_px = max_dim_px * 0.2;
// update meridians
self.meridians = {
// Select the good step with a binary search
let step_lon_precised = bbox.get_lon_size() * 0.15;
let step_lon_precised =
bbox.get_lon_size() * step_line_px / (camera.get_width() as f64);
let step_lon = select_fixed_step(step_lon_precised);
let decimal_lon_prec = step_lon.to_degrees().log10().abs().ceil() as u8;
@@ -236,7 +235,8 @@ impl ProjetedGrid {
};
self.parallels = {
let step_lat_precised = aspect * bbox.get_lat_size() * 0.15;
let step_lat_precised =
bbox.get_lat_size() * step_line_px / (camera.get_height() as f64);
let step_lat = select_fixed_step(step_lat_precised);
let decimal_lat_prec = step_lat.to_degrees().log10().abs().ceil() as u8;
@@ -349,7 +349,7 @@ const GRID_STEPS: &[f64] = &[
0.08726647,
0.17453293,
0.34906585,
std::f64::consts::FRAC_PI_6,
std::f64::consts::FRAC_PI_4,
];
fn select_fixed_step(fov: f64) -> f64 {

View File

@@ -1,52 +1,50 @@
use al_api::hips::{DataproductType, ImageExt};
use al_api::hips::ImageExt;
use crate::math::spectra::Freq;
use al_core::image::format::ImageFormatType;
use al_core::texture::format::PixelType;
use al_core::image::format::{ChannelType, ImageFormatType};
use web_sys::{RequestCredentials, RequestMode};
#[derive(Debug)]
pub struct HiPSConfig {
pub root_url: String,
// HiPS image format
// TODO: Make that independant of the HiPS but of the ImageFormat
// Size of the tiles
pub tile_size: i32,
// The size of the texture images
tile_size: i32,
// Number of slices for HiPS cubes
pub cube_depth: Option<u32>,
/// Max depth of the current HiPS tiles
pub max_depth_tile: u8,
/// Min depth of the current HiPS tiles
min_depth_tile: u8,
/// Max depth in the frequency axis (HiPS3D only)
pub max_depth_freq: Option<u8>,
// the number of slices for cubes
cube_depth: Option<u32>,
/// Start of spectral coordinates (in meters)
pub em_min: Option<Freq>,
/// End of spectral coordinates (in meters)
pub em_max: Option<Freq>,
// For HiPS3D
pub tile_depth: Option<u8>,
// Max depth of the current HiPS tiles
max_depth_tile: u8,
pub is_allsky: bool,
// TODO: store this values in the ImageSurvey
// These are proper to the survey (FITS one) and not
// to a specific survey color
pub fits_metadata: bool,
pub scale: f32,
pub offset: f32,
pub blank: f32,
pub tex_storing_integers: bool,
pub tex_storing_fits: bool,
pub tex_storing_unsigned_int: bool,
pub frame: CooSystem,
// For FITS HiPSes
pub bitpix: Option<i32>,
format: ImageFormatType,
pub dataproduct_type: DataproductType,
//dataproduct_subtype: Option<Vec<String>>,
//colored: bool,
pub creator_did: String,
pub request_credentials: RequestCredentials,
pub request_mode: RequestMode,
}
use crate::{math::spectra::Wavelength, HiPSProperties};
use crate::HiPSProperties;
use al_api::coo_system::CooSystem;
use wasm_bindgen::JsValue;
@@ -70,7 +68,12 @@ impl HiPSConfig {
// Determine the size of the texture to copy
// it cannot be > to 512x512px
let _fmt = properties.get_formats();
let bitpix = properties.get_bitpix();
let mut tex_storing_unsigned_int = false;
let mut tex_storing_integers = false;
let mut tex_storing_fits = false;
if !properties.get_formats().contains(&img_ext) {
return Err(js_sys::Error::new("HiPS format not available").into());
@@ -80,19 +83,45 @@ impl HiPSConfig {
ImageExt::Fits => {
// Check the bitpix to determine the internal format of the tiles
if let Some(bitpix) = bitpix {
let fmt = (match bitpix {
8 => Ok(PixelType::R8U),
16 => Ok(PixelType::R16I),
32 => Ok(PixelType::R32I),
-32 => Ok(PixelType::R32F),
-64 => Ok(PixelType::R32F),
64 => Ok(PixelType::R32I),
let channel = (match bitpix {
#[cfg(feature = "webgl2")]
8 => {
tex_storing_fits = true;
tex_storing_unsigned_int = true;
Ok(ChannelType::R8UI)
}
#[cfg(feature = "webgl2")]
16 => {
tex_storing_fits = true;
tex_storing_integers = true;
Ok(ChannelType::R16I)
}
#[cfg(feature = "webgl2")]
32 => {
tex_storing_fits = true;
tex_storing_integers = true;
Ok(ChannelType::R32I)
}
-32 => {
tex_storing_fits = true;
tex_storing_integers = false;
Ok(ChannelType::R32F)
}
-64 => {
tex_storing_fits = true;
tex_storing_integers = false;
//Err(JsValue::from_str("f64 FITS files not supported"))
Ok(ChannelType::R64F)
}
_ => Err(JsValue::from_str(
"Fits tiles exists but the BITPIX is not correct in the property file",
)),
})?;
Ok(ImageFormatType { ext: img_ext, fmt })
Ok(ImageFormatType {
ext: img_ext,
channel,
})
} else {
Err(JsValue::from_str(
"Fits tiles exists but the BITPIX is not found",
@@ -101,11 +130,11 @@ impl HiPSConfig {
}
ImageExt::Png | ImageExt::Webp => Ok(ImageFormatType {
ext: img_ext,
fmt: PixelType::RGBA8U,
channel: ChannelType::RGBA8U,
}),
ImageExt::Jpeg => Ok(ImageFormatType {
ext: img_ext,
fmt: PixelType::RGB8U,
channel: ChannelType::RGB8U,
}),
}?;
@@ -130,19 +159,6 @@ impl HiPSConfig {
_ => RequestMode::Cors,
};
let dataproduct_type = properties.get_dataproduct_type().ok_or(JsValue::from_str(
"dataproduct_type keyword is required in the HiPS properties file",
))?;
let max_depth_freq = properties.get_hips_order_freq();
let tile_depth = properties.get_hips_tile_depth();
let em_min: Option<Freq> = properties
.get_em_max()
.map(|lambda| Wavelength(lambda as f64).into());
let em_max: Option<Freq> = properties
.get_em_min()
.map(|lambda| Wavelength(lambda as f64).into());
let hips_config = HiPSConfig {
creator_did,
// HiPS name
@@ -152,23 +168,24 @@ impl HiPSConfig {
is_allsky,
// HiPSCube
fits_metadata: false,
scale: 1.0,
offset: 0.0,
blank: -1.0, // by default, set it to -1
tex_storing_fits,
tex_storing_integers,
tex_storing_unsigned_int,
// the number of slices in a cube
cube_depth,
em_min,
em_max,
// HiPS3D
tile_depth,
max_depth_freq,
frame,
bitpix,
format,
tile_size,
request_credentials,
request_mode,
dataproduct_type,
};
Ok(hips_config)
@@ -179,33 +196,66 @@ impl HiPSConfig {
ImageExt::Fits => {
// Check the bitpix to determine the internal format of the tiles
if let Some(bitpix) = self.bitpix {
let fmt = (match bitpix {
8 => Ok(PixelType::R8U),
16 => Ok(PixelType::R16I),
32 => Ok(PixelType::R32I),
64 => Ok(PixelType::R32I),
-32 => Ok(PixelType::R32F),
-64 => Ok(PixelType::R32F),
let channel = (match bitpix {
#[cfg(feature = "webgl2")]
8 => {
self.tex_storing_fits = true;
self.tex_storing_unsigned_int = true;
Ok(ChannelType::R8UI)
}
#[cfg(feature = "webgl2")]
16 => {
self.tex_storing_fits = true;
self.tex_storing_integers = true;
Ok(ChannelType::R16I)
}
#[cfg(feature = "webgl2")]
32 => {
self.tex_storing_fits = true;
self.tex_storing_integers = true;
Ok(ChannelType::R32I)
}
-32 => {
self.tex_storing_fits = true;
self.tex_storing_integers = false;
Ok(ChannelType::R32F)
}
-64 => {
self.tex_storing_fits = true;
self.tex_storing_integers = false;
//Err(JsValue::from_str("f64 FITS files not supported"))
Ok(ChannelType::R64F)
}
_ => Err(JsValue::from_str(
"Fits tiles exists but the BITPIX is not correct in the property file",
)),
})?;
Ok(ImageFormatType { ext, fmt })
Ok(ImageFormatType { ext, channel })
} else {
Err(JsValue::from_str(
"Fits tiles exists but the BITPIX is not found",
))
}
}
ImageExt::Png | ImageExt::Webp => Ok(ImageFormatType {
ext,
fmt: PixelType::RGBA8U,
}),
ImageExt::Jpeg => Ok(ImageFormatType {
ext,
fmt: PixelType::RGB8U,
}),
ImageExt::Png | ImageExt::Webp => {
self.tex_storing_fits = false;
self.tex_storing_unsigned_int = false;
self.tex_storing_integers = false;
Ok(ImageFormatType {
ext,
channel: ChannelType::RGBA8U,
})
}
ImageExt::Jpeg => {
self.tex_storing_fits = false;
self.tex_storing_unsigned_int = false;
self.tex_storing_integers = false;
Ok(ImageFormatType {
ext,
channel: ChannelType::RGB8U,
})
}
}?;
self.format = format;
@@ -223,11 +273,18 @@ impl HiPSConfig {
self.root_url = root_url;
}
#[inline(always)]
pub fn get_cube_depth(&self) -> Option<u32> {
self.cube_depth
}
#[inline(always)]
pub fn set_fits_metadata(&mut self, bscale: f32, bzero: f32, blank: f32) {
self.scale = bscale;
self.offset = bzero;
self.blank = blank;
self.fits_metadata = true;
}
#[inline(always)]
pub fn allsky_tile_size(&self) -> i32 {
(self.get_tile_size() << 3).min(512)
@@ -284,7 +341,12 @@ use al_core::shader::{SendUniforms, ShaderBound};
impl SendUniforms for HiPSConfig {
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
// Send max depth
shader.attach_uniform("max_depth", &(self.max_depth_tile as i32));
shader
.attach_uniform("max_depth", &(self.max_depth_tile as i32))
.attach_uniform("tex_storing_fits", &self.tex_storing_fits)
.attach_uniform("scale", &self.scale)
.attach_uniform("offset", &self.offset)
.attach_uniform("blank", &self.blank);
shader
}

View File

@@ -1,22 +1,24 @@
use crate::renderable::hips::d2::texture::HpxTex;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use std::collections::HashMap;
use al_core::texture::format::PixelType;
use al_core::image::format::ChannelType;
use crate::downloader::request::allsky::AllskyRequest;
use crate::renderable::hips::HpxTile;
use cgmath::Vector3;
use al_api::hips::ImageExt;
use al_core::webgl_ctx::WebGlRenderingCtx;
use al_core::image::format::{R16I, R32F, R32I, R64F, R8UI, RGB8U, RGBA8U};
use al_core::image::Image;
use al_core::shader::{SendUniforms, ShaderBound};
use al_core::texture::format::{R16I, R32F, R32I, R8U, RGB8U, RGBA8U};
use al_core::Texture2DArray;
use al_core::WebGlContext;
use super::texture::HpxTexUniforms;
use super::texture::{HpxTexture2D, HpxTexture2DUniforms};
use crate::downloader::request::allsky::Allsky;
use crate::healpix::cell::HEALPixCell;
use crate::healpix::cell::NUM_HPX_TILES_DEPTH_ZERO;
use crate::renderable::hips::config::HiPSConfig;
@@ -24,19 +26,116 @@ use crate::time::Time;
use crate::Abort;
use crate::JsValue;
use super::super::tile_heap::{Tile, TileHeap};
#[derive(Clone, Debug)]
pub struct TextureCellItem {
cell: HEALPixCell,
time_request: Time,
}
impl TextureCellItem {
fn is_root(&self) -> bool {
self.cell.is_root()
}
}
impl PartialEq for TextureCellItem {
fn eq(&self, other: &Self) -> bool {
self.cell == other.cell
}
}
impl Eq for TextureCellItem {}
// Ordering based on the time the tile has been requested
impl PartialOrd for TextureCellItem {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for TextureCellItem {
fn cmp(&self, other: &Self) -> Ordering {
other
.time_request
.partial_cmp(&self.time_request)
.unwrap_abort()
}
}
impl From<HpxTexture2D> for TextureCellItem {
fn from(texture: HpxTexture2D) -> Self {
let time_request = texture.time_request();
let cell = *texture.cell();
Self { cell, time_request }
}
}
impl From<&HpxTexture2D> for TextureCellItem {
fn from(texture: &HpxTexture2D) -> Self {
let time_request = texture.time_request();
let cell = *texture.cell();
Self { cell, time_request }
}
}
impl From<&mut HpxTexture2D> for TextureCellItem {
fn from(texture: &mut HpxTexture2D) -> Self {
let time_request = texture.time_request();
let cell = *texture.cell();
Self { cell, time_request }
}
}
struct HEALPixCellHeap(BinaryHeap<TextureCellItem>);
impl HEALPixCellHeap {
fn with_capacity(cap: usize) -> Self {
Self(BinaryHeap::with_capacity(cap))
}
fn push<E: Into<TextureCellItem>>(&mut self, item: E) {
let item = item.into();
self.0.push(item);
}
fn update_entry<E: Into<TextureCellItem>>(&mut self, item: E) {
let item = item.into();
self.0 = self
.0
.drain()
// Remove the cell
.filter(|texture_node| texture_node.cell != item.cell)
// Collect to a new binary heap that does not have cell anymore
.collect::<BinaryHeap<_>>();
self.push(item);
}
fn clear(&mut self) {
self.0.clear();
}
fn pop(&mut self) -> Option<TextureCellItem> {
self.0.pop()
}
fn len(&self) -> usize {
self.0.len()
}
}
use crate::renderable::hips::HpxTileBuffer;
// Fixed sized binary heap
pub struct HiPS2DBuffer {
// Some information about the HiPS
config: HiPSConfig,
heap: TileHeap<HEALPixCell>,
textures: HashMap<HEALPixCell, HpxTex>,
heap: HEALPixCellHeap,
num_root_textures_available: u8,
size: usize,
base_textures: [HpxTex; NUM_HPX_TILES_DEPTH_ZERO],
textures: HashMap<HEALPixCell, HpxTexture2D>,
base_textures: [HpxTexture2D; NUM_HPX_TILES_DEPTH_ZERO],
// Array of 2D textures
tile_pixels: Texture2DArray,
@@ -51,7 +150,7 @@ pub struct HiPS2DBuffer {
fn create_hpx_texture_storage(
gl: &WebGlContext,
// The texture image channel definition
channel: PixelType,
channel: ChannelType,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles: i32,
// The size of the tile
@@ -83,50 +182,60 @@ fn create_hpx_texture_storage(
),
];
match channel {
PixelType::RGBA8U => Texture2DArray::create_empty::<RGBA8U>(
ChannelType::RGBA8U => Texture2DArray::create_empty::<RGBA8U>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
PixelType::RGB8U => Texture2DArray::create_empty::<RGB8U>(
ChannelType::RGB8U => Texture2DArray::create_empty::<RGB8U>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
PixelType::R32F => Texture2DArray::create_empty::<R32F>(
ChannelType::R32F => Texture2DArray::create_empty::<R32F>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
PixelType::R8U => Texture2DArray::create_empty::<R8U>(
#[cfg(feature = "webgl2")]
ChannelType::R8UI => Texture2DArray::create_empty::<R8UI>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
PixelType::R16I => Texture2DArray::create_empty::<R16I>(
#[cfg(feature = "webgl2")]
ChannelType::R16I => Texture2DArray::create_empty::<R16I>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
PixelType::R32I => Texture2DArray::create_empty::<R32I>(
#[cfg(feature = "webgl2")]
ChannelType::R32I => Texture2DArray::create_empty::<R32I>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
#[cfg(feature = "webgl2")]
ChannelType::R64F => Texture2DArray::create_empty::<R64F>(
gl, tile_size, tile_size,
// 256 is a consensus for targetting the maximum GPU architectures. We create a 128 slices to optimize performance
num_tiles, tex_params,
),
_ => unimplemented!(),
}
}
impl HiPS2DBuffer {
pub fn push_allsky(&mut self, allsky: AllskyRequest) -> Result<(), JsValue> {
let AllskyRequest { request, .. } = allsky;
pub fn push_allsky(&mut self, allsky: Allsky) -> Result<(), JsValue> {
let Allsky {
image, time_req, ..
} = allsky;
{
let mutex_locked = request.data.borrow();
let mutex_locked = image.borrow();
let images = mutex_locked.as_ref().unwrap_abort();
for (idx, image) in images.iter().enumerate() {
self.push(&HEALPixCell(0, idx as u64), image, request.time_request)?;
self.push(&HEALPixCell(0, idx as u64), image, time_req)?;
}
}
@@ -149,6 +258,14 @@ impl HiPS2DBuffer {
}
}
fn is_heap_full(&self) -> bool {
// Check that there are no more than num_textures
// textures in the buffer
let num_textures_heap = self.heap.len();
num_textures_heap == self.size
}
// Update the priority of the texture containing the tile
// It must be ensured that the tile is already contained in the buffer
pub fn update_priority(&mut self, cell: &HEALPixCell /*, new_fov_cell: bool*/) {
@@ -161,7 +278,7 @@ impl HiPS2DBuffer {
let texture = self
.textures
.get(cell)
.get_mut(cell)
.expect("Texture cell has not been found while the buffer contains one of its tile!");
// Reset the time the tile has been received if it is a new cell present in the fov
//if new_fov_cell {
@@ -174,8 +291,8 @@ impl HiPS2DBuffer {
// But other textures can be removed thanks to the heap
// data-structure. We have to update the time_request of the texture
// and push it again in the heap to update its position.
let mut tex_cell_item: Tile<HEALPixCell> = texture.into();
tex_cell_item.reset_time();
let mut tex_cell_item: TextureCellItem = texture.into();
tex_cell_item.time_request = Time::now();
self.heap.update_entry(tex_cell_item);
}
@@ -222,14 +339,14 @@ impl HiPS2DBuffer {
if !self.contains_tile(cell) {
// The texture is not among the essential ones
// (i.e. is not a root texture)
let mut texture = if self.heap.is_full() {
let mut texture = if self.is_heap_full() {
// Pop the oldest requested texture
let oldest_texture = self.heap.pop().unwrap_abort();
// Ensure this is not a base texture
debug_assert!(!oldest_texture.is_root());
// Remove it from the textures HashMap
let mut texture = self.textures.remove(oldest_texture.cell()).expect(
let mut texture = self.textures.remove(&oldest_texture.cell).expect(
"Texture (oldest one) has not been found in the buffer of textures",
);
texture.replace(cell, time_request);
@@ -238,7 +355,7 @@ impl HiPS2DBuffer {
} else {
let idx = NUM_HPX_TILES_DEPTH_ZERO + self.heap.len();
HpxTex::new(cell, idx as i32, time_request)
HpxTexture2D::new(cell, idx as i32, time_request)
};
texture.copy_to_gpu(
@@ -263,8 +380,6 @@ impl HiPS2DBuffer {
cell: &HEALPixCell,
dx: f64,
dy: f64,
scale: f32,
offset: f32,
) -> Result<JsValue, JsValue> {
let value = if let Some(tile) = self.get(cell) {
// Index of the texture in the total set of textures
@@ -280,27 +395,28 @@ impl HiPS2DBuffer {
tile_idx,
);
match self.config.get_format().get_pixel_format() {
PixelType::RGB8U | PixelType::RGBA8U => self
.tile_pixels
.read_pixel(pos_tex.x, pos_tex.y, pos_tex.z)?,
_ => {
let uvy = 1.0 - (pos_tex.y as f32 / tile_size);
pos_tex.y = (uvy * tile_size) as i32;
// Offset in the slice in pixels
if self.config.tex_storing_fits {
let uvy = 1.0 - (pos_tex.y as f32 / tile_size);
let f64_v = self
.tile_pixels
.read_pixel(pos_tex.x, pos_tex.y, pos_tex.z)?
.as_f64()
.ok_or("Error unwraping the pixel read value.")?;
pos_tex.y = (uvy * tile_size) as i32;
}
// 1 channel
// scale the value
let scale = scale as f64;
let offset = offset as f64;
let value = self
.tile_pixels
.read_pixel(pos_tex.x, pos_tex.y, pos_tex.z)?;
JsValue::from_f64(f64_v * scale + offset)
}
if self.config.tex_storing_fits {
// scale the value
let f64_v = value
.as_f64()
.ok_or("Error unwraping the pixel read value.")?;
let scale = self.config.scale as f64;
let offset = self.config.offset as f64;
JsValue::from_f64(f64_v * scale + offset)
} else {
value
}
} else {
JsValue::null()
@@ -312,26 +428,10 @@ impl HiPS2DBuffer {
pub fn render_allsky(&mut self, flag: bool) {
self.allsky_rendering = flag;
}
// Get the nearest parent tile found in the CPU buffer
pub fn get_nearest_parent(&self, cell: &HEALPixCell) -> Option<HEALPixCell> {
let mut parent_cell = cell.parent();
while !self.contains(&parent_cell) && !parent_cell.is_root() {
parent_cell = parent_cell.parent();
}
if self.contains(&parent_cell) {
Some(parent_cell)
} else {
None
}
}
}
impl HpxTileBuffer for HiPS2DBuffer {
type T = HpxTex;
type C = HEALPixCell;
type T = HpxTexture2D;
fn new(gl: &WebGlContext, config: HiPSConfig) -> Result<Self, JsValue> {
let size = 128 - NUM_HPX_TILES_DEPTH_ZERO;
@@ -339,25 +439,25 @@ impl HpxTileBuffer for HiPS2DBuffer {
// Ensures there is at least space for the 12
// root textures
//debug_assert!(size >= NUM_HPX_TILES_DEPTH_ZERO);
let heap = TileHeap::with_capacity(size);
let heap = HEALPixCellHeap::with_capacity(size);
let textures = HashMap::with_capacity(size);
let now = Time::now();
let base_textures = [
HpxTex::new(&HEALPixCell(0, 0), 0, now),
HpxTex::new(&HEALPixCell(0, 1), 1, now),
HpxTex::new(&HEALPixCell(0, 2), 2, now),
HpxTex::new(&HEALPixCell(0, 3), 3, now),
HpxTex::new(&HEALPixCell(0, 4), 4, now),
HpxTex::new(&HEALPixCell(0, 5), 5, now),
HpxTex::new(&HEALPixCell(0, 6), 6, now),
HpxTex::new(&HEALPixCell(0, 7), 7, now),
HpxTex::new(&HEALPixCell(0, 8), 8, now),
HpxTex::new(&HEALPixCell(0, 9), 9, now),
HpxTex::new(&HEALPixCell(0, 10), 10, now),
HpxTex::new(&HEALPixCell(0, 11), 11, now),
HpxTexture2D::new(&HEALPixCell(0, 0), 0, now),
HpxTexture2D::new(&HEALPixCell(0, 1), 1, now),
HpxTexture2D::new(&HEALPixCell(0, 2), 2, now),
HpxTexture2D::new(&HEALPixCell(0, 3), 3, now),
HpxTexture2D::new(&HEALPixCell(0, 4), 4, now),
HpxTexture2D::new(&HEALPixCell(0, 5), 5, now),
HpxTexture2D::new(&HEALPixCell(0, 6), 6, now),
HpxTexture2D::new(&HEALPixCell(0, 7), 7, now),
HpxTexture2D::new(&HEALPixCell(0, 8), 8, now),
HpxTexture2D::new(&HEALPixCell(0, 9), 9, now),
HpxTexture2D::new(&HEALPixCell(0, 10), 10, now),
HpxTexture2D::new(&HEALPixCell(0, 11), 11, now),
];
let channel = config.get_format().get_pixel_format();
let channel = config.get_format().get_channel();
let tile_size = config.get_tile_size();
let tile_pixels = create_hpx_texture_storage(gl, channel, 128, tile_size)?;
@@ -374,6 +474,7 @@ impl HpxTileBuffer for HiPS2DBuffer {
config,
heap,
size,
num_root_textures_available,
textures,
base_textures,
@@ -389,7 +490,7 @@ impl HpxTileBuffer for HiPS2DBuffer {
fn set_image_ext(&mut self, gl: &WebGlContext, ext: ImageExt) -> Result<(), JsValue> {
self.config.set_image_ext(ext)?;
let channel = self.config.get_format().get_pixel_format();
let channel = self.config.get_format().get_channel();
let tile_size = self.config.get_tile_size();
self.tile_pixels = create_hpx_texture_storage(gl, channel, 128, tile_size)?;
@@ -398,18 +499,18 @@ impl HpxTileBuffer for HiPS2DBuffer {
let now = Time::now();
self.base_textures = [
HpxTex::new(&HEALPixCell(0, 0), 0, now),
HpxTex::new(&HEALPixCell(0, 1), 1, now),
HpxTex::new(&HEALPixCell(0, 2), 2, now),
HpxTex::new(&HEALPixCell(0, 3), 3, now),
HpxTex::new(&HEALPixCell(0, 4), 4, now),
HpxTex::new(&HEALPixCell(0, 5), 5, now),
HpxTex::new(&HEALPixCell(0, 6), 6, now),
HpxTex::new(&HEALPixCell(0, 7), 7, now),
HpxTex::new(&HEALPixCell(0, 8), 8, now),
HpxTex::new(&HEALPixCell(0, 9), 9, now),
HpxTex::new(&HEALPixCell(0, 10), 10, now),
HpxTex::new(&HEALPixCell(0, 11), 11, now),
HpxTexture2D::new(&HEALPixCell(0, 0), 0, now),
HpxTexture2D::new(&HEALPixCell(0, 1), 1, now),
HpxTexture2D::new(&HEALPixCell(0, 2), 2, now),
HpxTexture2D::new(&HEALPixCell(0, 3), 3, now),
HpxTexture2D::new(&HEALPixCell(0, 4), 4, now),
HpxTexture2D::new(&HEALPixCell(0, 5), 5, now),
HpxTexture2D::new(&HEALPixCell(0, 6), 6, now),
HpxTexture2D::new(&HEALPixCell(0, 7), 7, now),
HpxTexture2D::new(&HEALPixCell(0, 8), 8, now),
HpxTexture2D::new(&HEALPixCell(0, 9), 9, now),
HpxTexture2D::new(&HEALPixCell(0, 10), 10, now),
HpxTexture2D::new(&HEALPixCell(0, 11), 11, now),
];
self.heap.clear();
@@ -433,7 +534,7 @@ impl HpxTileBuffer for HiPS2DBuffer {
// Tell if a texture is available meaning all its sub tiles
// must have been written for the GPU
fn contains(&self, cell: &Self::C) -> bool {
fn contains(&self, cell: &HEALPixCell) -> bool {
if let Some(t) = self.get(cell) {
t.is_on_gpu()
} else {
@@ -442,7 +543,7 @@ impl HpxTileBuffer for HiPS2DBuffer {
}
/// Accessors
fn get(&self, cell: &Self::C) -> Option<&Self::T> {
fn get(&self, cell: &HEALPixCell) -> Option<&Self::T> {
if cell.is_root() {
let HEALPixCell(_, idx) = cell;
Some(&self.base_textures[*idx as usize])
@@ -476,7 +577,7 @@ impl SendUniforms for HiPS2DBuffer {
let cell = HEALPixCell(0, idx as u64);
let texture = self.get(&cell).unwrap();
let texture_uniforms = HpxTexUniforms::new(texture, idx as i32);
let texture_uniforms = HpxTexture2DUniforms::new(texture, idx as i32);
shader.attach_uniforms_from(&texture_uniforms);
}

View File

@@ -2,22 +2,17 @@ pub mod buffer;
pub mod texture;
use crate::app::BLENDING_ANIM_DURATION;
use crate::browser_support::BrowserFeaturesSupport;
use crate::downloader::query;
use crate::downloader::query::CellDesc;
use crate::downloader::request::allsky::AllskyRequest;
use crate::math::angle::ToAngle;
use crate::tile_fetcher::TileFetcherQueue;
use crate::renderable::hips::HpxTile;
use al_api::hips::ImageExt;
use al_api::hips::ImageMetadata;
use al_core::colormap::Colormap;
use al_core::colormap::Colormaps;
use al_core::texture::format::PixelType;
use al_core::image::format::ChannelType;
use cgmath::Vector2;
use cgmath::Vector3;
use crate::renderable::hips::FitsParams;
use al_core::image::Image;
use al_core::shader::Shader;
@@ -35,7 +30,8 @@ use crate::camera::CameraViewPort;
use crate::shader::ShaderManager;
use crate::utils;
use crate::healpix::{cell::HEALPixCell, moc::SpaceMoc};
use crate::downloader::request::allsky::Allsky;
use crate::healpix::{cell::HEALPixCell, coverage::HEALPixCoverage};
use crate::time::Time;
use super::config::HiPSConfig;
@@ -46,7 +42,7 @@ use std::collections::HashSet;
// to not be too much skewed
use buffer::HiPS2DBuffer;
use texture::HpxTex;
use texture::HpxTexture2D;
use super::raytracing::RayTracer;
use super::uv::{TileCorner, TileUVW};
@@ -65,21 +61,13 @@ pub struct HpxDrawData<'a> {
impl<'a> HpxDrawData<'a> {
fn from_texture(
starting_texture: &HpxTex,
ending_texture: &HpxTex,
starting_texture: &HpxTexture2D,
ending_texture: &HpxTexture2D,
cell: &'a HEALPixCell,
) -> Self {
let uv_0 = TileUVW::new(
cell,
&Some(starting_texture.cell),
starting_texture.idx() as f32,
);
let uv_1 = TileUVW::new(
cell,
&Some(ending_texture.cell),
ending_texture.idx() as f32,
);
let start_time = ending_texture.start_time.unwrap_or(Time::now()).as_millis();
let uv_0 = TileUVW::new(cell, starting_texture);
let uv_1 = TileUVW::new(cell, ending_texture);
let start_time = ending_texture.start_time().as_millis();
Self {
uv_0,
@@ -109,49 +97,43 @@ pub fn get_raster_shader<'a>(
shaders: &'a mut ShaderManager,
config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> {
match config.get_format().get_pixel_format() {
PixelType::R8U => crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_u8.frag",
),
PixelType::R16I => crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_i16.frag",
),
PixelType::R32I => crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_i32.frag",
),
PixelType::R32F => crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_f32.frag",
),
// color case
_ => {
if cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_rgba.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_rgba2cmap.frag",
)
}
if config.get_format().is_colored() {
if cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_color.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_color_to_colormap.frag",
)
}
} else if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_rasterizer_raster.vert",
"hips_rasterizer_grayscale_to_colormap.frag",
)
}
}
@@ -161,49 +143,44 @@ pub fn get_raytracer_shader<'a>(
shaders: &'a mut ShaderManager,
config: &HiPSConfig,
) -> Result<&'a Shader, JsValue> {
match config.get_format().get_pixel_format() {
PixelType::R8U => crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_u8.frag",
),
PixelType::R16I => crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_i16.frag",
),
PixelType::R32I => crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_i32.frag",
),
PixelType::R32F => crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_f32.frag",
),
// color case
_ => {
if cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_rgba.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_rgba2cmap.frag",
)
}
//let colored_hips = config.is_colored();
if config.get_format().is_colored() {
if cmap.label() == "native" {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_color.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_color_to_colormap.frag",
)
}
} else if config.tex_storing_unsigned_int {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap_u.frag",
)
} else if config.tex_storing_integers {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap_i.frag",
)
} else {
crate::shader::get_shader(
gl,
shaders,
"hips_raytracer_raytracer.vert",
"hips_raytracer_grayscale_to_colormap.frag",
)
}
}
@@ -236,12 +213,10 @@ pub struct HiPS2D {
vao: VertexArrayObject,
gl: WebGlContext,
moc: Option<SpaceMoc>,
footprint_moc: Option<HEALPixCoverage>,
// A buffer storing the cells in the view
hpx_cells_in_view: Vec<HEALPixCell>,
pub(crate) fits_params: Option<FitsParams>,
}
use super::HpxTileBuffer;
@@ -302,7 +277,7 @@ impl HiPS2D {
let buffer = HiPS2DBuffer::new(gl, config)?;
let gl = gl.clone();
let moc = None;
let footprint_moc = None;
let hpx_cells_in_view = vec![];
// request the allsky texture
Ok(Self {
@@ -314,8 +289,6 @@ impl HiPS2D {
gl,
fits_params: None,
position,
uv_start,
uv_end,
@@ -323,17 +296,15 @@ impl HiPS2D {
idx_vertices,
moc,
footprint_moc,
hpx_cells_in_view,
})
}
pub fn look_for_new_tiles(
&mut self,
tile_fetcher: &mut TileFetcherQueue,
camera: &CameraViewPort,
browser_features_support: &BrowserFeaturesSupport,
) {
pub fn look_for_new_tiles<'a>(
&'a mut self,
camera: &'a CameraViewPort,
) -> Option<impl Iterator<Item = HEALPixCell> + 'a> {
// do not add tiles if the view is already at depth 0
let cfg = self.get_config();
let depth_tile = camera
@@ -342,66 +313,37 @@ impl HiPS2D {
.max(cfg.get_min_depth_tile());
let survey_frame = cfg.get_frame();
let min_tile_depth = cfg.get_min_depth_tile();
let mut already_considered_tiles = HashSet::new();
let tile_queries_iter = camera
let tile_cells_iter = camera
.get_hpx_cells(depth_tile, survey_frame)
.into_iter()
.filter_map(|tile_cell| {
let make_query = if let Some(moc) = self.moc.as_ref() {
moc.intersects_cell(&tile_cell) && !self.update_priority_tile(&tile_cell)
} else {
!self.update_priority_tile(&tile_cell)
};
.filter(move |tile_cell| {
if already_considered_tiles.contains(tile_cell) {
return false;
}
if make_query {
Some(query::Tile::new(
&tile_cell,
self.get_config(),
browser_features_support,
))
already_considered_tiles.insert(*tile_cell);
if let Some(moc) = self.footprint_moc.as_ref() {
moc.intersects_cell(tile_cell) && !self.update_priority_tile(tile_cell)
} else {
None
!self.update_priority_tile(tile_cell)
}
});
let mut ancestors = HashSet::new();
for tile_query in tile_queries_iter {
match tile_query.cell {
CellDesc::HiPS2D { cell, .. } => {
let tile_cell = cell;
tile_fetcher.append(tile_query);
// check if we are starting aladin lite or not.
// If so we want to retrieve only the tiles in the view and access them
// directly i.e. without blending them with less precised tiles
if tile_fetcher.get_num_tile_fetched() > 0
&& tile_cell.depth() >= min_tile_depth + 3
{
let ancestor_tile_cell = tile_cell.ancestor(3);
ancestors.insert(ancestor_tile_cell);
}
}
_ => unreachable!(),
}
}
for ancestor in ancestors {
if !self.update_priority_tile(&ancestor) {
tile_fetcher.append(query::Tile::new(
&ancestor,
self.get_config(),
browser_features_support,
));
}
}
Some(tile_cells_iter)
}
pub fn contains_tile(&self, cell: &HEALPixCell) -> bool {
self.buffer.contains_tile(cell)
}
pub fn get_tile_query(&self, cell: &HEALPixCell) -> query::Tile {
let cfg = self.get_config();
query::Tile::new(cell, None, cfg)
}
pub fn update(&mut self, camera: &mut CameraViewPort, projection: &ProjectionType) {
let raytracing = camera.is_raytracing(projection);
@@ -442,13 +384,13 @@ impl HiPS2D {
}
#[inline]
pub fn set_moc(&mut self, moc: SpaceMoc) {
self.moc = Some(moc);
pub fn set_moc(&mut self, moc: HEALPixCoverage) {
self.footprint_moc = Some(moc);
}
#[inline]
pub fn get_moc(&self) -> Option<&SpaceMoc> {
self.moc.as_ref()
pub fn get_moc(&self) -> Option<&HEALPixCoverage> {
self.footprint_moc.as_ref()
}
pub fn set_image_ext(&mut self, ext: ImageExt) -> Result<(), JsValue> {
@@ -481,13 +423,7 @@ impl HiPS2D {
let (pix, dx, dy) = crate::healpix::utils::hash_with_dxdy(depth, &lonlat);
let tile_cell = HEALPixCell(depth, pix);
let (bscale, bzero) = if let Some(FitsParams { bscale, bzero, .. }) = self.fits_params {
(bscale, bzero)
} else {
(1.0, 0.0)
};
self.buffer.read_pixel(&tile_cell, dx, dy, bscale, bzero)
self.buffer.read_pixel(&tile_cell, dx, dy)
} else {
Err(JsValue::from_str("Out of projection"))
}
@@ -502,7 +438,7 @@ impl HiPS2D {
let cfg = self.buffer.config();
// Get the coo system transformation matrix
let channel = cfg.get_format().get_pixel_format();
let channel = cfg.get_format().get_channel();
// Retrieve the model and inverse model matrix
let mut off_indices = 0;
@@ -520,7 +456,7 @@ impl HiPS2D {
// super::subdivide::num_hpx_subdivision(&self.hpx_cells_in_view[0], camera, projection);
for cell in &self.hpx_cells_in_view {
// filter textures that are not in the moc
let cell_in_cov = if let Some(moc) = self.moc.as_ref() {
let cell_in_cov = if let Some(moc) = self.footprint_moc.as_ref() {
if moc.intersects_cell(cell) {
// Rasterizer does not render tiles that are not in the MOC
// This is not a problem for transparency rendered HiPses (FITS or PNG)
@@ -595,7 +531,7 @@ impl HiPS2D {
} else {
// No ancestor has been found in the buffer to draw.
// We might want to check if the HiPS channel is JPEG to mock a cell that will be drawn in black
if channel == PixelType::RGB8U {
if channel == ChannelType::RGB8U {
Some(HpxDrawData::new(cell))
} else {
None
@@ -604,7 +540,7 @@ impl HiPS2D {
} else {
// No ancestor has been found in the buffer to draw.
// We might want to check if the HiPS channel is JPEG to mock a cell that will be drawn in black
if channel == PixelType::RGB8U {
if channel == ChannelType::RGB8U {
Some(HpxDrawData::new(cell))
} else {
None
@@ -737,7 +673,7 @@ impl HiPS2D {
}
}
pub fn push_tile<I: Image>(
pub fn add_tile<I: Image>(
&mut self,
cell: &HEALPixCell,
image: I,
@@ -746,7 +682,7 @@ impl HiPS2D {
self.buffer.push(cell, image, time_request)
}
pub fn add_allsky(&mut self, allsky: AllskyRequest) -> Result<(), JsValue> {
pub fn add_allsky(&mut self, allsky: Allsky) -> Result<(), JsValue> {
self.buffer.push_allsky(allsky)
}
@@ -818,7 +754,7 @@ impl HiPS2D {
.attach_uniform("current_time", &utils::get_current_time())
.attach_uniform(
"no_tile_color",
&(if config.get_format().get_pixel_format() == PixelType::RGB8U {
&(if config.get_format().get_channel() == ChannelType::RGB8U {
Vector4::new(0.0, 0.0, 0.0, 1.0)
} else {
Vector4::new(0.0, 0.0, 0.0, 0.0)
@@ -827,10 +763,6 @@ impl HiPS2D {
.attach_uniform("opacity", opacity)
.attach_uniforms_from(colormaps);
if let Some(fits_params) = self.fits_params.as_ref() {
shader.attach_uniforms_from(fits_params);
}
raytracer.draw(&shader);
} else {
let v2w = (*camera.get_m2w()) * c.transpose();
@@ -859,13 +791,7 @@ impl HiPS2D {
.attach_uniform("current_time", &utils::get_current_time())
.attach_uniform("opacity", opacity)
.attach_uniform("u_proj", proj)
.attach_uniforms_from(colormaps);
if let Some(fits_params) = self.fits_params.as_ref() {
shader.attach_uniforms_from(fits_params);
}
shader
.attach_uniforms_from(colormaps)
.bind_vertex_array_object_ref(&self.vao)
.draw_elements_with_i32(
WebGl2RenderingContext::TRIANGLES,
@@ -880,14 +806,7 @@ impl HiPS2D {
})?;
//self.gl.disable(WebGl2RenderingContext::BLEND);
Ok(())
}
pub fn set_fits_params(&mut self, bscale: f32, bzero: f32, blank: Option<f32>) {
self.fits_params = Some(FitsParams {
bscale,
bzero,
blank,
});
}
}

View File

@@ -4,8 +4,8 @@ use al_core::image::Image;
use al_core::Texture2DArray;
use wasm_bindgen::JsValue;
pub struct HpxTex {
pub cell: HEALPixCell,
pub struct HpxTexture2D {
tile_cell: HEALPixCell,
// Precomputed uniq number
uniq: i32,
// Position of the texture in the buffer
@@ -13,7 +13,7 @@ pub struct HpxTex {
// The time the texture has been received
// If the texture contains multiple tiles, then the receiving time
// is set when all the tiles have been copied to the buffer
pub start_time: Option<Time>,
start_time: Option<Time>,
// The time request of the texture is the time request
// of the first tile being inserted in it
// It is then only given in the constructor of Texture
@@ -22,21 +22,23 @@ pub struct HpxTex {
// texture. But this is too expensive because at each tile inserted
// in the buffer, one should reevalute the priority of the texture
// in the buffer's binary heap.
pub time_request: Time,
time_request: Time,
// Full flag telling the texture has been filled
copied_to_gpu: bool,
}
impl HpxTex {
use crate::renderable::hips::HpxTile;
impl HpxTexture2D {
pub fn new(cell: &HEALPixCell, idx: i32, time_request: Time) -> Self {
let start_time = None;
let copied_to_gpu = false;
let cell = *cell;
let tile_cell = *cell;
let uniq = cell.uniq();
Self {
cell,
tile_cell,
uniq,
time_request,
idx,
@@ -54,13 +56,13 @@ impl HpxTex {
}
// Setter
pub fn replace(&mut self, cell: &HEALPixCell, time_request: Time) {
pub fn replace(&mut self, tile_cell: &HEALPixCell, time_request: Time) {
// Cancel the tasks copying the tiles contained in the texture
// which have not yet been completed.
//self.clear_tasks_in_progress(config, exec);
self.cell = *cell;
self.uniq = cell.uniq();
self.tile_cell = *tile_cell;
self.uniq = tile_cell.uniq();
self.copied_to_gpu = false;
self.start_time = None;
self.time_request = time_request;
@@ -75,7 +77,8 @@ impl HpxTex {
image: &I,
gpu_texture: &Texture2DArray,
) -> Result<(), JsValue> {
debug_assert!(*cell == self.cell);
debug_assert!(*cell == self.tile_cell);
debug_assert!(!self.copied_to_gpu);
self.copied_to_gpu = true;
self.start_time = Some(Time::now());
@@ -84,8 +87,7 @@ impl HpxTex {
}
}
/*
impl HpxTile for HpxTex {
impl HpxTile for HpxTexture2D {
// Getter
// Returns the current time if the texture is not full
fn start_time(&self) -> Time {
@@ -101,43 +103,44 @@ impl HpxTile for HpxTex {
}
fn cell(&self) -> &HEALPixCell {
&self.cell
&self.tile_cell
}
}*/
}
use std::cmp::Ordering;
impl PartialOrd for HpxTex {
impl PartialOrd for HpxTexture2D {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for HpxTex {
use crate::Abort;
impl Ord for HpxTexture2D {
fn cmp(&self, other: &Self) -> Ordering {
self.uniq.cmp(&other.uniq)
}
}
impl PartialEq for HpxTex {
impl PartialEq for HpxTexture2D {
fn eq(&self, other: &Self) -> bool {
self.uniq == other.uniq
}
}
impl Eq for HpxTex {}
impl Eq for HpxTexture2D {}
pub struct HpxTexUniforms<'a> {
texture: &'a HpxTex,
pub struct HpxTexture2DUniforms<'a> {
texture: &'a HpxTexture2D,
name: String,
}
impl<'a> HpxTexUniforms<'a> {
pub fn new(texture: &'a HpxTex, idx_texture: i32) -> Self {
let name = format!("textures_tiles[{idx_texture}].");
HpxTexUniforms { texture, name }
impl<'a> HpxTexture2DUniforms<'a> {
pub fn new(texture: &'a HpxTexture2D, idx_texture: i32) -> Self {
let name = format!("textures_tiles[{}].", idx_texture);
HpxTexture2DUniforms { texture, name }
}
}
use al_core::shader::{SendUniforms, ShaderBound};
impl SendUniforms for HpxTexUniforms<'_> {
impl SendUniforms for HpxTexture2DUniforms<'_> {
// Info: These uniforms are used for raytracing drawing mode only
fn attach_uniforms<'b>(&self, shader: &'b ShaderBound<'b>) -> &'b ShaderBound<'b> {
shader
@@ -157,7 +160,7 @@ impl SendUniforms for HpxTexUniforms<'_> {
)
.attach_uniform(
&format!("{}{}", self.name, "start_time"),
&self.texture.start_time.unwrap_or(Time::now()),
&self.texture.start_time(),
);
shader

View File

@@ -0,0 +1,197 @@
use std::collections::HashMap;
use al_core::image::Image;
use al_core::WebGlContext;
use super::texture::HpxTexture3D;
use crate::downloader::request::allsky::Allsky;
use crate::healpix::cell::HEALPixCell;
use crate::renderable::hips::config::HiPSConfig;
use crate::renderable::hips::HpxTileBuffer;
use crate::time::Time;
use crate::Abort;
use crate::JsValue;
use al_api::hips::ImageExt;
// Fixed sized binary heap
pub struct HiPS3DBuffer {
// Some information about the HiPS
textures: HashMap<HEALPixCell, HpxTexture3D>,
config: HiPSConfig,
available_tiles_during_frame: bool,
gl: WebGlContext,
}
impl HiPS3DBuffer {
pub fn new(gl: &WebGlContext, config: HiPSConfig) -> Result<Self, JsValue> {
let textures = HashMap::new();
let available_tiles_during_frame = false;
let gl = gl.clone();
Ok(Self {
config,
textures,
available_tiles_during_frame,
gl,
})
}
pub fn push_allsky(&mut self, allsky: Allsky) -> Result<(), JsValue> {
let Allsky {
image,
time_req,
//depth_tile,
channel,
..
} = allsky;
{
let mutex_locked = image.borrow();
let images = mutex_locked.as_ref().unwrap_abort();
for (idx, image) in images.iter().enumerate() {
self.push(
&HEALPixCell(0, idx as u64),
image,
time_req,
channel.map(|c| c as u16).unwrap_or(0),
)?;
}
}
Ok(())
}
pub fn find_nearest_slice(&self, cell: &HEALPixCell, slice: u16) -> Option<u16> {
self.get(cell).and_then(|t| t.find_nearest_slice(slice))
}
// This method pushes a new downloaded tile into the buffer
// It must be ensured that the tile is not already contained into the buffer
pub fn push<I: Image>(
&mut self,
cell: &HEALPixCell,
image: I,
time_request: Time,
slice_idx: u16,
) -> Result<(), JsValue> {
let tex = if let Some(tex) = self.textures.get_mut(cell) {
tex
} else {
self.textures
.insert(*cell, HpxTexture3D::new(*cell, time_request));
self.textures.get_mut(cell).unwrap()
};
// copy to the 3D textured block
tex.append(image, slice_idx, &self.config, &self.gl)?;
self.available_tiles_during_frame = true;
Ok(())
}
// Return if tiles did become available
pub fn reset_available_tiles(&mut self) -> bool {
let available_tiles_during_frame = self.available_tiles_during_frame;
self.available_tiles_during_frame = false;
available_tiles_during_frame
}
// Tell if a texture is available meaning all its sub tiles
// must have been written for the GPU
pub fn contains_tile(&self, texture_cell: &HEALPixCell, slice: u16) -> bool {
self.get(texture_cell)
.is_some_and(|t| t.contains_slice(slice))
}
/// Accessors
pub fn get(&self, cell: &HEALPixCell) -> Option<&HpxTexture3D> {
self.textures.get(cell)
}
pub fn config(&self) -> &HiPSConfig {
&self.config
}
pub fn config_mut(&mut self) -> &mut HiPSConfig {
&mut self.config
}
}
impl HpxTileBuffer for HiPS3DBuffer {
type T = HpxTexture3D;
fn new(gl: &WebGlContext, config: HiPSConfig) -> Result<Self, JsValue> {
let textures = HashMap::new();
let available_tiles_during_frame = false;
let gl = gl.clone();
Ok(Self {
config,
textures,
available_tiles_during_frame,
gl,
})
}
// Return if tiles did become available
fn reset_available_tiles(&mut self) -> bool {
let available_tiles_during_frame = self.available_tiles_during_frame;
self.available_tiles_during_frame = false;
available_tiles_during_frame
}
fn set_image_ext(&mut self, _gl: &WebGlContext, ext: ImageExt) -> Result<(), JsValue> {
self.config.set_image_ext(ext)?;
self.textures.clear();
//self.ready = false;
self.available_tiles_during_frame = true;
Ok(())
}
// Tell if a texture is available meaning all its sub tiles
// must have been written for the GPU
fn contains(&self, cell: &HEALPixCell) -> bool {
self.get(cell).is_some()
}
/// Accessors
fn get(&self, cell: &HEALPixCell) -> Option<&HpxTexture3D> {
self.textures.get(cell)
}
fn config(&self) -> &HiPSConfig {
&self.config
}
fn config_mut(&mut self) -> &mut HiPSConfig {
&mut self.config
}
}
use al_core::shader::SendUniforms;
use al_core::shader::ShaderBound;
impl SendUniforms for HiPS3DBuffer {
// Send only the allsky textures
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
shader.attach_uniforms_from(&self.config)
}
}
impl Drop for HiPS3DBuffer {
fn drop(&mut self) {
// drop all the 3D block textures
self.textures.clear();
}
}

View File

@@ -1,264 +0,0 @@
use std::collections::HashMap;
use al_core::image::Image;
use al_core::WebGlContext;
use super::super::tile_heap::TileHeap;
use super::texture::HpxFreqTex;
use crate::healpix::cell::HEALPixFreqCell;
use crate::renderable::hips::config::HiPSConfig;
use crate::renderable::hips::HpxTileBuffer;
use crate::time::Time;
use crate::Abort;
use crate::JsValue;
use al_api::hips::ImageExt;
// Fixed sized binary heap
pub struct HiPS3DBuffer {
// Some information about the HiPS
textures: HashMap<HEALPixFreqCell, HpxFreqTex>,
heap: TileHeap<HEALPixFreqCell>,
config: HiPSConfig,
available_tiles_during_frame: bool,
gl: WebGlContext,
}
impl HiPS3DBuffer {
/*pub fn push_allsky(&mut self, allsky: AllskyRequest) -> Result<(), JsValue> {
let AllskyRequest {
request,
//depth_tile,
channel,
..
} = allsky;
{
let mutex_locked = request.data.borrow();
let images = mutex_locked.as_ref().unwrap_abort();
for (idx, image) in images.iter().enumerate() {
self.push(
&HEALPixCell(0, idx as u64),
image,
request.time_request,
channel.map(|c| c as u16).unwrap_or(0),
)?;
}
}
Ok(())
}*/
/*pub fn find_nearest_slice(&self, cell: &HEALPixCell, slice: u16) -> Option<u16> {
self.get(cell).and_then(|t| t.find_nearest_slice(slice))
}*/
fn push_cell(&mut self, cell: &HEALPixFreqCell, time_request: Time) -> Result<(), JsValue> {
// Check if the cell is not yet contain in the buffer
if !self.contains(cell) {
// If not, add create it and add it to the buffer
if self.heap.is_full() {
// Pop the oldest requested texture
let oldest_texture = self.heap.pop().unwrap_abort();
// Remove it from the textures HashMap
self.textures
.remove(oldest_texture.cell())
.expect("Texture (oldest one) has not been found in the buffer of textures");
}
let texture = HpxFreqTex::new(
cell.clone(),
time_request,
self.config.tile_size as u16,
self.config.tile_depth.unwrap_or(32) as u16,
self.config.get_format().get_pixel_format(),
&self.gl,
)?;
// Push it to the buffer
self.heap.push(&texture);
self.textures.insert(cell.clone(), texture);
};
Ok(())
}
// Push a image slice into the buffer
pub fn push_tile_slice<I: Image>(
&mut self,
cell: &HEALPixFreqCell,
image: I,
time_request: Time,
// this slice index inside the cubic cell
slice_idx: u16,
) -> Result<(), JsValue> {
self.push_cell(cell, time_request)?;
let texture = self.textures.get_mut(cell).unwrap_abort();
// And copy the image in that cubic tile
texture.append_tile_slice(image, slice_idx)?;
self.available_tiles_during_frame = true;
Ok(())
}
pub fn push_tile_from_fits(
&mut self,
cell: &HEALPixFreqCell,
raw_bytes: js_sys::Uint8Array,
size: (u32, u32, u32),
time_request: Time,
) -> Result<(), JsValue> {
self.push_cell(cell, time_request)?;
let texture = self.textures.get_mut(cell).unwrap_abort();
// And copy the image in that cubic tile
texture.set_data_from_fits(raw_bytes, size)?;
self.available_tiles_during_frame = true;
Ok(())
}
pub fn push_tile_from_jpeg(
&mut self,
cell: &HEALPixFreqCell,
decoded_bytes: Box<[u8]>,
size: (u32, u32, u32),
time_request: Time,
) -> Result<(), JsValue> {
self.push_cell(cell, time_request)?;
let texture = self.textures.get_mut(cell).unwrap_abort();
// And copy the image in that cubic tile
texture.set_data_from_jpeg(decoded_bytes, size)?;
self.available_tiles_during_frame = true;
Ok(())
}
pub fn push_tile_from_png(
&mut self,
cell: &HEALPixFreqCell,
decoded_bytes: Box<[u8]>,
size: (u32, u32, u32),
time_request: Time,
) -> Result<(), JsValue> {
self.push_cell(cell, time_request)?;
let texture = self.textures.get_mut(cell).unwrap_abort();
// And copy the image in that cubic tile
texture.set_data_from_png(decoded_bytes, size)?;
self.available_tiles_during_frame = true;
Ok(())
}
// Tell if a texture is available meaning all its sub tiles
// must have been written for the GPU
pub fn contains_slice(
&self,
// the cell to check
cell: &HEALPixFreqCell,
// the idx of one slice inside the cube, has to be in [0; 2^(f_order) - 1]
idx_slice: u16,
) -> bool {
self.get(cell).is_some_and(|t| t.contains_slice(idx_slice))
}
// Get the nearest spatial parent found in the buffer
pub fn get_nearest_parent(&self, cell: &HEALPixFreqCell) -> Option<HEALPixFreqCell> {
let mut parent_cell = cell.parent();
while !self.contains(&parent_cell) && !parent_cell.is_hpx_root() {
parent_cell = parent_cell.parent();
}
if self.contains(&parent_cell) {
Some(parent_cell)
} else {
None
}
}
}
impl HpxTileBuffer for HiPS3DBuffer {
type T = HpxFreqTex;
type C = HEALPixFreqCell;
fn new(gl: &WebGlContext, config: HiPSConfig) -> Result<Self, JsValue> {
let textures = HashMap::new();
// Limit the number of cached cubes to 256 so approx 256 MB
let heap = TileHeap::with_capacity(1024);
let available_tiles_during_frame = false;
let gl = gl.clone();
Ok(Self {
config,
textures,
heap,
available_tiles_during_frame,
gl,
})
}
// Return if tiles did become available
fn reset_available_tiles(&mut self) -> bool {
let available_tiles_during_frame = self.available_tiles_during_frame;
self.available_tiles_during_frame = false;
available_tiles_during_frame
}
fn set_image_ext(&mut self, _gl: &WebGlContext, ext: ImageExt) -> Result<(), JsValue> {
self.config.set_image_ext(ext)?;
self.textures.clear();
self.heap.clear();
self.available_tiles_during_frame = true;
Ok(())
}
/// Accessors
fn get(&self, cell: &Self::C) -> Option<&HpxFreqTex> {
self.textures.get(cell)
}
fn contains(&self, cell: &Self::C) -> bool {
self.get(cell).is_some()
}
fn config(&self) -> &HiPSConfig {
&self.config
}
fn config_mut(&mut self) -> &mut HiPSConfig {
&mut self.config
}
}
use al_core::shader::SendUniforms;
use al_core::shader::ShaderBound;
impl SendUniforms for HiPS3DBuffer {
// Send only the allsky textures
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
shader.attach_uniforms_from(&self.config)
}
}
impl Drop for HiPS3DBuffer {
fn drop(&mut self) {
// drop all the 3D block textures
self.textures.clear();
self.heap.clear();
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,139 +1,23 @@
use crate::time::Time;
use crate::renderable::hips::d2::texture::HpxTexture2D;
use crate::{healpix::cell::HEALPixCell, time::Time};
use crate::renderable::hips::d3::Freq;
use crate::Abort;
use crate::WebGlContext;
use al_core::image::fits::FitsImage;
use al_core::image::raw::ImageBuffer;
use al_core::image::format::{
ChannelType, R16I, R32F, R32I, R64F, R8UI, RGB32F, RGB8U, RGBA32F, RGBA8U,
};
use al_core::image::Image;
use al_core::texture::format::{PixelType, R16I, R32F, R32I, R8U};
use al_core::texture::Texture3D;
use al_core::webgl_ctx::WebGlRenderingCtx;
use cgmath::Vector3;
use fitsrs::hdu::header::Bitpix;
use std::cmp::Ordering;
use std::ops::Range;
use wasm_bindgen::JsValue;
pub enum HpxFreqData {
Fits {
// The raw bytes of the whole cubic FITS file, data big endian
raw_bytes: Box<[u8]>,
// Offset to the data bytes of the cubic tile
data_byte_offset: Range<usize>,
// Number of bytes per pixel (deduced from the bitpix)
bitpix: Bitpix,
// Triming offset indices when reading the data
trim: (u32, u32, u32),
// Naxis
naxis: (u32, u32, u32),
// Scaling value
bscale: f32,
// Offset value
bzero: f32,
// The real size of the cube
size: (u32, u32, u32),
},
Jpeg {
data: Box<[u8]>,
size: (u32, u32, u32),
},
Png {
data: Box<[u8]>,
size: (u32, u32, u32),
},
}
pub enum Pixel {
F32(f32),
I32(i32),
I16(i16),
U8(u8),
}
impl Pixel {
pub fn to_f32(&self) -> f32 {
match *self {
Pixel::F32(v) => v,
Pixel::I16(v) => v as f32,
Pixel::I32(v) => v as f32,
Pixel::U8(v) => v as f32,
}
}
}
impl HpxFreqData {
pub fn read_pixel(&self, x: u32, y: u32, z: u32) -> Option<f32> {
match self {
HpxFreqData::Fits {
raw_bytes,
data_byte_offset,
bitpix,
trim,
naxis,
bscale,
bzero,
size,
} => {
// Do not remember the origin in fits image data is left-down corner
let y = size.1 - y;
let x_in_data = (trim.0..(trim.0 + naxis.0)).contains(&x);
let y_in_data = (trim.1..(trim.1 + naxis.1)).contains(&y);
let z_in_data = (trim.2..(trim.2 + naxis.2)).contains(&z);
if !x_in_data || !y_in_data || !z_in_data {
None
} else {
let x = x - trim.0;
let y = y - trim.1;
let z = z - trim.2;
let data_raw_bytes = &raw_bytes[data_byte_offset.clone()];
let bytes_per_pixel = bitpix.byte_size();
let pixel_bytes_off =
bytes_per_pixel * (x + y * naxis.0 + z * (naxis.0 * naxis.1)) as usize;
let p = &data_raw_bytes[pixel_bytes_off..(pixel_bytes_off + bytes_per_pixel)];
let pixel = match bitpix {
Bitpix::U8 => Pixel::U8(p[0]),
Bitpix::I16 => Pixel::I16(i16::from_be_bytes([p[0], p[1]])),
Bitpix::I32 => Pixel::I32(i32::from_be_bytes([p[0], p[1], p[2], p[3]])),
Bitpix::F32 => Pixel::F32(f32::from_be_bytes([p[0], p[1], p[2], p[3]])),
Bitpix::F64 => Pixel::F32(f64::from_be_bytes([
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
]) as f32),
Bitpix::I64 => Pixel::I32(i64::from_be_bytes([
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
]) as i32),
};
Some(pixel.to_f32() * (*bscale) + (*bzero))
}
}
HpxFreqData::Jpeg { data, size } => {
let pixel_bytes_off = (x + y * size.0 + z * (size.0 * size.1)) as usize;
let p = data[pixel_bytes_off];
Some(p as f32)
}
HpxFreqData::Png { data, size } => {
let pixel_bytes_off = (x + y * size.0 + z * (size.0 * size.1)) as usize;
let p = data[2 * pixel_bytes_off];
Some(p as f32)
}
}
}
}
pub struct HpxFreqTex {
pub cell: HEALPixFreqCell,
pub struct HpxTexture3D {
tile_cell: HEALPixCell,
// Precomputed uniq number
uniq: i32,
// The time the texture has been received
// If the texture contains multiple tiles, then the receiving time
// is set when all the tiles have been copied to the buffer
pub start_time: Option<Time>,
start_time: Option<Time>,
// The time request of the texture is the time request
// of the first tile being inserted in it
// It is then only given in the constructor of Texture
@@ -142,269 +26,281 @@ pub struct HpxFreqTex {
// texture. But this is too expensive because at each tile inserted
// in the buffer, one should reevalute the priority of the texture
// in the buffer's binary heap.
pub time_request: Time,
time_request: Time,
// OLD CODE
// We autorize 512 cubic tiles of size 32 each which allows to store max 16384 slices
//textures: Vec<Option<Texture3D>>,
textures: Vec<Option<Texture3D>>,
// A set of already inserted slices. Each cubic tiles can have 32 slices. The occupancy of the
// slices inside a cubic tile is done with a u32 mask. Limited to 16384 slices
//blocks: [u32; 512],
blocks: [u32; 512],
// sorted index list of 32-length blocks that are not empty
//block_indices: Vec<usize>,
/// The webgl2 3D texture of the cubic tile
pub texture: Texture3D,
data: Option<HpxFreqData>,
// The real image data for accessing the pixel values
//data: ImageType,
/// A bitvector keeping track of the slices that have been inserted into the 3D texture
/// It is limited to a cube depth of 256 (~ to the max texture size).
slice_idx: [u32; 8],
/// Depth of the tile
num_slices: u16,
/// Number of slices copied (concerns only HiPSCube)
num_stored_slices: u16,
block_indices: Vec<usize>,
}
const TEX_PARAMS: &[(u32, u32)] = &[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents r-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_R,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
];
use crate::renderable::hips::config::HiPSConfig;
use crate::WebGlContext;
use crate::healpix::cell::HEALPixFreqCell;
impl HpxFreqTex {
pub fn new(
// The cubic tile definition to locate the cube in the sky + spectral axis
cell: HEALPixFreqCell,
// The time the request has been made, i.e. when the tile was needed
time_request: Time,
// The size of the cubis tile
tile_size: u16,
// The depth of the cubic tile. Must be a power of two
num_slices: u16,
// pixel format
pixel_format: PixelType,
// The Gl context
gl: &WebGlContext,
) -> Result<Self, JsValue> {
use crate::renderable::hips::HpxTile;
impl HpxTexture3D {
pub fn new(tile_cell: HEALPixCell, time_request: Time) -> Self {
let start_time = None;
let texture = match pixel_format {
// alpha transparency
PixelType::RGBA8U => Texture3D::create_empty::<R16I>(
gl,
tile_size as i32,
tile_size as i32,
num_slices as i32,
TEX_PARAMS,
),
PixelType::RGB8U => Texture3D::create_empty::<R8U>(
gl,
tile_size as i32,
tile_size as i32,
num_slices as i32,
TEX_PARAMS,
),
PixelType::R8U => Texture3D::create_empty::<R8U>(
gl,
tile_size as i32,
tile_size as i32,
num_slices as i32,
TEX_PARAMS,
),
PixelType::R32F => Texture3D::create_empty::<R32F>(
gl,
tile_size as i32,
tile_size as i32,
num_slices as i32,
TEX_PARAMS,
),
PixelType::R16I => Texture3D::create_empty::<R16I>(
gl,
tile_size as i32,
tile_size as i32,
num_slices as i32,
TEX_PARAMS,
),
PixelType::R32I => Texture3D::create_empty::<R32I>(
gl,
tile_size as i32,
tile_size as i32,
num_slices as i32,
TEX_PARAMS,
),
}?;
let data = None;
let num_stored_slices = 0;
let slice_idx = [0x0; 8];
Ok(Self {
cell,
slice_idx,
let uniq = tile_cell.uniq();
let textures = std::iter::repeat_n(None, 512).collect();
let blocks = [0; 512];
let block_indices = Vec::new();
Self {
tile_cell,
uniq,
time_request,
start_time,
data,
texture,
num_slices,
num_stored_slices,
})
textures,
blocks,
block_indices,
}
}
pub fn set_data_from_fits(
&mut self,
// the tile image of the whole cubic tile
raw_bytes: js_sys::Uint8Array,
// size of the cube
size: (u32, u32, u32),
) -> Result<(), JsValue> {
let raw_bytes = raw_bytes.to_vec().into_boxed_slice();
pub fn find_nearest_slice(&self, slice: u16) -> Option<u16> {
let block_idx = (slice >> 5) as usize;
let (trim1, trim2, trim3, width, height, depth, bitpix, data_byte_offset, bscale, bzero) = {
let fits = FitsImage::from_raw_bytes(&raw_bytes[..])?;
fits[0].insert_into_3d_texture(&self.texture, &Vector3::<i32>::new(0, 0, 0))?;
match self.block_indices.binary_search(&block_idx) {
Ok(_) => {
if self.contains_slice(slice) {
Some(slice)
} else {
// the slice is not present but we know there is one in the block
let block = self.blocks[block_idx];
(
fits[0].trim1,
fits[0].trim2,
fits[0].trim3,
fits[0].width,
fits[0].height,
fits[0].depth,
fits[0].bitpix,
fits[0].data_byte_offset.clone(),
fits[0].bscale,
fits[0].bzero,
)
};
let slice_idx = (slice & 0x1f) as u32;
let trim = (trim1, trim2, trim3);
let naxis = (width, height, depth);
let m2 = if slice_idx == 31 {
0
} else {
0xffffffff >> (slice_idx + 1)
};
let m1 = (!m2) & !(1 << (31 - slice_idx));
self.data = Some(HpxFreqData::Fits {
raw_bytes,
data_byte_offset: data_byte_offset.clone(),
bitpix,
trim,
naxis,
bscale,
bzero,
size,
});
self.num_stored_slices = self.num_slices;
self.start_time = Some(Time::now());
let lb = (block & m1) >> (32 - slice_idx);
let rb = block & m2;
Ok(())
let lb_trailing_zeros = (lb.trailing_zeros() as u16).min(slice_idx as u16);
let rb_leading_zeros = (rb.leading_zeros() - slice_idx - 1) as u16;
let no_more_left_bits = slice_idx - (lb_trailing_zeros as u32) == 0;
let no_more_right_bits = slice_idx + (rb_leading_zeros as u32) == 31;
match (no_more_left_bits, no_more_right_bits) {
(false, false) => {
if lb_trailing_zeros <= rb_leading_zeros {
Some(slice - lb_trailing_zeros - 1)
} else {
Some(slice + rb_leading_zeros + 1)
}
}
(false, true) => {
if lb_trailing_zeros <= rb_leading_zeros {
Some(slice - lb_trailing_zeros - 1)
} else {
// explore next block
if block_idx == self.blocks.len() - 1 {
// no after block
Some(slice - lb_trailing_zeros - 1)
} else {
// get the next block
let next_block = self.blocks[block_idx + 1];
let num_bits_to_next_block =
next_block.leading_zeros() as u16 + rb_leading_zeros;
if num_bits_to_next_block < lb_trailing_zeros {
Some(slice + num_bits_to_next_block + 1)
} else {
Some(slice - lb_trailing_zeros - 1)
}
}
}
}
(true, false) => {
if rb_leading_zeros <= lb_trailing_zeros {
Some(slice + rb_leading_zeros + 1)
} else {
// explore previous block
if block_idx == 0 {
// no after block
Some(slice + rb_leading_zeros + 1)
} else {
// get the next block
let prev_block = self.blocks[block_idx - 1];
let num_bits_from_prev_block =
prev_block.trailing_zeros() as u16 + lb_trailing_zeros;
if num_bits_from_prev_block < rb_leading_zeros {
Some(slice - num_bits_from_prev_block - 1)
} else {
Some(slice + rb_leading_zeros + 1)
}
}
}
}
(true, true) => unreachable!(),
}
}
}
Err(i) => {
let prev_block = if i > 0 {
self.block_indices.get(i - 1)
} else {
None
};
let cur_block = self.block_indices.get(i);
match (prev_block, cur_block) {
(Some(b_idx_1), Some(b_idx_2)) => {
let b1 = self.blocks[*b_idx_1];
let b2 = self.blocks[*b_idx_2];
let b1_tz = b1.trailing_zeros() as usize;
let b2_lz = b2.leading_zeros() as usize;
let slice_b1 = ((*b_idx_1 << 5) + 32 - b1_tz - 1) as u16;
let slice_b2 = ((*b_idx_2 << 5) + b2_lz) as u16;
if slice - slice_b1 <= slice_b2 - slice {
// the nearest slice is in b1
Some(slice_b1)
} else {
// the nearest slice is in b2
Some(slice_b2)
}
}
(None, Some(b_idx_2)) => {
let b2 = self.blocks[*b_idx_2];
let b2_lz = b2.leading_zeros() as usize;
Some(((*b_idx_2 << 5) + b2_lz) as u16)
}
(Some(b_idx_1), None) => {
let b1 = self.blocks[*b_idx_1];
let b1_tz = b1.trailing_zeros() as usize;
Some(((*b_idx_1 << 5) + 32 - b1_tz - 1) as u16)
}
(None, None) => None,
}
}
}
}
pub fn read_pixel(&self, x: u32, y: u32, z: u32) -> Option<f32> {
if let Some(data) = &self.data {
data.read_pixel(x, y, z)
pub fn get_3d_block_from_slice(&self, slice: u16) -> Option<&Texture3D> {
let block_idx = (slice >> 5) as usize;
self.textures[block_idx].as_ref()
}
pub fn extract_2d_slice_texture(&self, slice: u16) -> Option<HpxTexture2D> {
// Find the good sub cube containing the slice
let block_idx = (slice >> 5) as usize;
let slice_idx = (slice & 0x1f) as u8;
// check the texture is there
if self.blocks[block_idx] & (1 << (31 - slice_idx)) != 0 {
Some(HpxTexture2D::new(
&self.tile_cell,
slice_idx as i32,
self.time_request,
))
} else {
None
}
}
pub fn frequencies(&self) -> Vec<f32> {
let delta_depth = self.num_slices.trailing_zeros();
let pixel_depth = self.cell.f_depth + delta_depth as u8;
let h0 = self.cell.f_hash << delta_depth;
let h1 = (self.cell.f_hash + 1) << delta_depth;
(h0..h1)
.map(|hash| Freq::from_hash_with_order(hash, pixel_depth).0 as f32)
.collect()
}
pub fn set_data_from_jpeg(
&mut self,
// the tile image of the whole cubic tile
decoded_bytes: Box<[u8]>,
// size of the cube
size: (u32, u32, u32),
) -> Result<(), JsValue> {
let cubic_tile = ImageBuffer::<R8U>::new(decoded_bytes, size.0, size.1, size.2);
cubic_tile.insert_into_3d_texture(&self.texture, &Vector3::<i32>::new(0, 0, 0))?;
self.data = Some(HpxFreqData::Jpeg {
data: cubic_tile.data,
size,
});
self.num_stored_slices = self.num_slices;
self.start_time = Some(Time::now());
Ok(())
}
pub fn set_data_from_png(
&mut self,
// the tile image of the whole cubic tile
decoded_bytes: Box<[u8]>,
// size of the cube
size: (u32, u32, u32),
) -> Result<(), JsValue> {
let cubic_tile = ImageBuffer::<R16I>::new(decoded_bytes, size.0, size.1, size.2);
cubic_tile.insert_into_3d_texture(&self.texture, &Vector3::<i32>::new(0, 0, 0))?;
self.data = Some(HpxFreqData::Png {
data: cubic_tile.data,
size,
});
self.num_stored_slices = self.num_slices;
self.start_time = Some(Time::now());
Ok(())
}
// Panic if cell is not contained in the texture
// Do nothing if the texture is full
// Return true if the tile is newly added
// Used by HiPS Cubes
pub fn append_tile_slice<I: Image>(
pub fn append<I: Image>(
&mut self,
// the tile image of 1 slice
image: I,
// the slice offset in the cubic tile
offset: u16,
slice: u16,
cfg: &HiPSConfig,
gl: &WebGlContext,
) -> Result<(), JsValue> {
// If there is already something, do not tex sub
let block_idx = (offset >> 5) as usize;
let slice_idx = (offset & 0x1f) as u8;
let block_idx = (slice >> 5) as usize;
if self.slice_idx[block_idx] & (1 << (31 - slice_idx)) == 0 {
image.insert_into_3d_texture(
&self.texture,
&Vector3::<i32>::new(0, 0, slice_idx as i32),
)?;
let texture = if let Some(texture) = self.textures[block_idx].as_ref() {
texture
} else {
let tile_size = cfg.get_tile_size();
let params = &[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents r-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_R,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
];
self.slice_idx[block_idx] |= 1 << (31 - slice_idx);
self.num_stored_slices += 1;
let texture = match cfg.get_format().get_channel() {
ChannelType::RGBA32F => {
Texture3D::create_empty::<RGBA32F>(gl, tile_size, tile_size, 32, params)
}
ChannelType::RGB32F => {
Texture3D::create_empty::<RGB32F>(gl, tile_size, tile_size, 32, params)
}
ChannelType::RGBA8U => {
Texture3D::create_empty::<RGBA8U>(gl, tile_size, tile_size, 32, params)
}
ChannelType::RGB8U => {
Texture3D::create_empty::<RGB8U>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R32F => {
Texture3D::create_empty::<R32F>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R64F => {
Texture3D::create_empty::<R64F>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R8UI => {
Texture3D::create_empty::<R8UI>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R16I => {
Texture3D::create_empty::<R16I>(gl, tile_size, tile_size, 32, params)
}
ChannelType::R32I => {
Texture3D::create_empty::<R32I>(gl, tile_size, tile_size, 32, params)
}
};
self.textures[block_idx] = Some(texture?);
self.textures[block_idx].as_ref().unwrap()
};
let slice_idx = slice & 0x1f;
// if there is already something, do not tex sub
if self.blocks[block_idx] & (1 << (31 - slice_idx)) == 0 {
image.insert_into_3d_texture(texture, &Vector3::<i32>::new(0, 0, slice_idx as i32))?;
match self.block_indices.binary_search(&block_idx) {
Ok(_) => {} // element already in vector @ `pos`
Err(i) => self.block_indices.insert(i, block_idx),
}
self.blocks[block_idx] |= 1 << (31 - slice_idx);
}
self.start_time = Some(Time::now());
@@ -413,29 +309,50 @@ impl HpxFreqTex {
}
// Cell must be contained in the texture
pub fn contains_slice(&self, offset: u16) -> bool {
let block_idx = (offset >> 5) as usize;
let slice_idx = offset & 0x1f;
pub fn contains_slice(&self, slice: u16) -> bool {
let block_idx = (slice >> 5) as usize;
let idx_in_block = slice & 0x1f;
(self.slice_idx[block_idx] >> (31 - slice_idx)) & 0x1 == 1
(self.blocks[block_idx] >> (31 - idx_in_block)) & 0x1 == 1
}
}
impl PartialOrd for HpxFreqTex {
impl HpxTile for HpxTexture3D {
// Getter
// Returns the current time if the texture is not full
fn start_time(&self) -> Time {
if let Some(t) = self.start_time {
t
} else {
Time::now()
}
}
fn time_request(&self) -> Time {
self.time_request
}
fn cell(&self) -> &HEALPixCell {
&self.tile_cell
}
}
use std::cmp::Ordering;
impl PartialOrd for HpxTexture3D {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for HpxFreqTex {
use crate::Abort;
impl Ord for HpxTexture3D {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap_abort()
}
}
impl PartialEq for HpxFreqTex {
impl PartialEq for HpxTexture3D {
fn eq(&self, other: &Self) -> bool {
self.cell == other.cell
self.uniq == other.uniq
}
}
impl Eq for HpxFreqTex {}
impl Eq for HpxTexture3D {}

View File

@@ -3,23 +3,23 @@ pub mod config;
pub mod d2;
pub mod d3;
pub mod raytracing;
pub mod tile_heap;
mod triangulation;
pub mod uv;
pub use d2::HiPS2D;
use crate::browser_support::BrowserFeaturesSupport;
use crate::downloader::request::allsky::Allsky;
use crate::renderable::HiPSConfig;
use crate::tile_fetcher::TileFetcherQueue;
use crate::time::Time;
use crate::CameraViewPort;
use crate::HEALPixCell;
use crate::HEALPixCoverage;
use crate::WebGlContext;
use al_api::hips::ImageExt;
use wasm_bindgen::JsValue;
mod subdivide;
/*
pub(crate) trait HpxTile {
// Getter
// Returns the current time if the texture is not full
@@ -28,11 +28,10 @@ pub(crate) trait HpxTile {
fn time_request(&self) -> Time;
fn cell(&self) -> &HEALPixCell;
}*/
}
pub(crate) trait HpxTileBuffer {
type T;
type C;
type T: HpxTile;
fn new(gl: &WebGlContext, config: HiPSConfig) -> Result<Self, JsValue>
where
@@ -44,14 +43,35 @@ pub(crate) trait HpxTileBuffer {
fn reset_available_tiles(&mut self) -> bool;
/// Accessors
fn get(&self, cell: &Self::C) -> Option<&Self::T>;
fn get(&self, cell: &HEALPixCell) -> Option<&Self::T>;
fn contains(&self, cell: &Self::C) -> bool;
fn contains(&self, cell: &HEALPixCell) -> bool;
// Get the nearest parent tile found in the CPU buffer
fn get_nearest_parent(&self, cell: &HEALPixCell) -> Option<HEALPixCell> {
/*if cell.is_root() {
// Root cells are in the buffer by definition
Some(*cell)
} else {*/
let mut parent_cell = cell.parent();
while !self.contains(&parent_cell) && !parent_cell.is_root() {
parent_cell = parent_cell.parent();
}
if self.contains(&parent_cell) {
Some(parent_cell)
} else {
None
}
//}
}
fn config_mut(&mut self) -> &mut HiPSConfig;
fn config(&self) -> &HiPSConfig;
}
use crate::downloader::query;
use crate::renderable::hips::HiPS::{D2, D3};
use crate::renderable::HiPS3D;
use crate::ProjectionType;
@@ -63,15 +83,10 @@ pub enum HiPS {
}
impl HiPS {
pub fn look_for_new_tiles(
&mut self,
tile_fetcher: &mut TileFetcherQueue,
camera: &CameraViewPort,
browser_features_support: &BrowserFeaturesSupport,
) {
pub fn look_for_new_tiles(&mut self, camera: &CameraViewPort) -> Option<Vec<HEALPixCell>> {
match self {
D2(hips) => hips.look_for_new_tiles(tile_fetcher, camera, browser_features_support),
D3(hips) => hips.look_for_new_tiles(tile_fetcher, camera, browser_features_support),
D2(hips) => hips.look_for_new_tiles(camera).map(|it| it.collect()),
D3(hips) => hips.look_for_new_tiles(camera).map(|it| it.collect()),
}
}
@@ -99,10 +114,10 @@ impl HiPS {
}
#[inline]
pub fn set_root_url(&mut self, root_url: String) {
pub fn get_config_mut(&mut self) -> &mut HiPSConfig {
match self {
D2(hips) => hips.get_config_mut().set_root_url(root_url),
D3(hips) => hips.get_config_mut().set_root_url(root_url),
D2(hips) => hips.get_config_mut(),
D3(hips) => hips.get_config_mut(),
}
}
@@ -113,43 +128,31 @@ impl HiPS {
}
}
#[inline]
pub fn set_moc(&mut self, moc: HEALPixCoverage) {
match self {
D2(hips) => hips.set_moc(moc),
D3(hips) => hips.set_moc(moc),
}
}
#[inline]
pub fn get_tile_query(&self, cell: &HEALPixCell) -> query::Tile {
match self {
HiPS::D2(hips) => hips.get_tile_query(cell),
HiPS::D3(hips) => hips.get_tile_query(cell),
}
}
#[inline]
pub fn add_allsky(&mut self, allsky: Allsky) -> Result<(), JsValue> {
match self {
HiPS::D2(hips) => hips.add_allsky(allsky),
HiPS::D3(hips) => hips.add_allsky(allsky),
}
}
pub fn is_allsky(&self) -> bool {
self.get_config().is_allsky
}
pub fn set_fits_params(&mut self, bscale: f32, bzero: f32, blank: Option<f32>) {
match self {
HiPS::D2(hips) => hips.set_fits_params(bscale, bzero, blank),
HiPS::D3(hips) => hips.set_fits_params(bscale, bzero, blank),
}
}
pub(crate) fn get_fits_params(&self) -> &Option<FitsParams> {
match self {
HiPS::D2(hips) => &hips.fits_params,
HiPS::D3(hips) => &hips.fits_params,
}
}
}
pub(crate) struct FitsParams {
pub bscale: f32,
pub bzero: f32,
pub blank: Option<f32>,
}
use al_core::shader::{SendUniforms, ShaderBound};
impl SendUniforms for FitsParams {
// Send only the allsky textures
fn attach_uniforms<'a>(&self, shader: &'a ShaderBound<'a>) -> &'a ShaderBound<'a> {
shader
.attach_uniform("scale", &self.bscale)
.attach_uniform("offset", &self.bzero);
if let Some(blank) = &self.blank {
shader.attach_uniform("blank", blank);
}
shader
}
}

View File

@@ -1,139 +0,0 @@
use crate::renderable::hips::d2::texture::HpxTex;
use crate::renderable::hips::d3::texture::HpxFreqTex;
use crate::time::Time;
use crate::Abort;
use crate::HEALPixCell;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
#[derive(Clone, Debug)]
pub struct Tile<C> {
cell: C,
time_request: Time,
}
impl<C> Tile<C> {
pub fn reset_time(&mut self) {
self.time_request = Time::now();
}
#[inline(always)]
pub fn cell(&self) -> &C {
&self.cell
}
}
impl Tile<HEALPixCell> {
pub fn is_root(&self) -> bool {
self.cell.is_root()
}
}
impl<C> PartialEq for Tile<C>
where
C: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.cell == other.cell
}
}
impl<C> Eq for Tile<C> where C: PartialEq {}
// Ordering based on the time the tile has been requested
impl<C> PartialOrd for Tile<C>
where
C: PartialEq,
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<C> Ord for Tile<C>
where
C: PartialEq,
{
fn cmp(&self, other: &Self) -> Ordering {
other
.time_request
.partial_cmp(&self.time_request)
.unwrap_abort()
}
}
impl From<&HpxTex> for Tile<HEALPixCell> {
fn from(tex: &HpxTex) -> Self {
let time_request = tex.time_request;
let cell = tex.cell;
Self { cell, time_request }
}
}
use crate::healpix::cell::HEALPixFreqCell;
impl From<&HpxFreqTex> for Tile<HEALPixFreqCell> {
fn from(tex: &HpxFreqTex) -> Self {
let time_request = tex.time_request;
let cell = tex.cell.clone();
Self { cell, time_request }
}
}
pub struct TileHeap<C> {
heap: BinaryHeap<Tile<C>>,
size: usize,
}
impl<C> TileHeap<C> {
pub fn clear(&mut self) {
self.heap.clear();
}
pub fn len(&self) -> usize {
self.heap.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<C> TileHeap<C>
where
C: PartialEq,
{
pub fn with_capacity(cap: usize) -> Self {
Self {
heap: BinaryHeap::with_capacity(cap),
size: cap,
}
}
// Check if the heap is full
pub fn is_full(&self) -> bool {
self.heap.len() >= self.size
}
pub fn update_entry<T: Into<Tile<C>>>(&mut self, item: T) {
let item = item.into();
self.heap = self
.heap
.drain()
// Remove the cell
.filter(|texture_node| texture_node.cell != item.cell)
// Collect to a new binary heap that does not have cell anymore
.collect::<BinaryHeap<_>>();
self.push(item);
}
pub fn push<T: Into<Tile<C>>>(&mut self, item: T) {
let item = item.into();
self.heap.push(item);
}
pub fn pop(&mut self) -> Option<Tile<C>> {
self.heap.pop()
}
}

View File

@@ -12,35 +12,26 @@ impl<T> Deref for UV<T> {
}
}
use super::d2::texture::HpxTexture2D;
use crate::healpix::cell::HEALPixCell;
use crate::renderable::hips::HpxTile;
pub struct TileUVW(pub [Vector3<f32>; 4]);
impl TileUVW {
// The texture cell passed must be a child of texture
pub fn new(cell: &HEALPixCell, parent_cell: &Option<HEALPixCell>, w: f32) -> TileUVW {
pub fn new(cell: &HEALPixCell, texture: &HpxTexture2D) -> TileUVW {
// Index of the texture in the total set of textures
//let texture_idx = texture.idx();
let texture_idx = texture.idx();
// Row and column indexes of the tile in its texture
let (u, v, ds) = if let Some(parent) = parent_cell {
let (idx_col_in_tex, idx_row_in_tex) = cell.offset_in_parent(parent);
let (idx_col_in_tex, idx_row_in_tex) = cell.offset_in_parent(texture.cell());
let nside = (1 << (cell.depth() - parent.depth())) as f32;
let ds = 1_f32 / nside;
let nside = (1 << (cell.depth() - texture.cell().depth())) as f32;
let u = (idx_row_in_tex as f32) / nside;
let v = (idx_col_in_tex as f32) / nside;
let u = (idx_row_in_tex as f32) / nside;
let v = (idx_col_in_tex as f32) / nside;
let ds = 1_f32 / nside;
(u, v, ds)
} else {
(0.0, 0.0, 1.0)
};
//let u = 0.0;
//let v = 0.0;
//let ds = 1.0;
// let w = texture_idx as f32;
let w = texture_idx as f32;
TileUVW([
Vector3::new(u, v, w),
Vector3::new(u + ds, v, w),

View File

@@ -1,11 +1,12 @@
use cgmath::Vector3;
use std::ops::RangeInclusive;
use wcs::ImgXY;
use crate::camera::CameraViewPort;
use crate::math::projection::ProjectionType;
use crate::renderable::utils::index_patch::CCWCheckPatchIndexIter;
use al_api::coo_system::CooSystem;
use fitsrs::wcs::{ImgXY, WCS};
use wcs::WCS;
pub fn get_grid_params(
xy_min: &(f64, f64),
@@ -93,6 +94,7 @@ fn get_coord_uv_it(
let x_it = std::iter::once((xmin, get_uv_in_tex_chunk(xmin)))
.chain(
tex_patch_x
.clone()
.skip(1)
.flat_map(|x1| vec![(x1, 1.0), (x1, 0.0)]),
)
@@ -172,7 +174,6 @@ pub fn vertices(
camera: &CameraViewPort,
wcs: &WCS,
projection: &ProjectionType,
rgba: bool,
) -> (Vec<f32>, Vec<f32>, Vec<u16>, Vec<u32>) {
let (x_it, y_it) = get_grid_params(
xy_min,
@@ -189,15 +190,7 @@ pub fn vertices(
let mut uv = vec![];
let pos = y_it
.flat_map(|(mut y, uvy)| {
// In FITS, the origin in lower left corner whereas in JPEG/PNG it is in upper left corner
// the WCS is aligned with the FITS convention so we must invert it for compressed RGBA images
y = if rgba {
wcs.img_dimensions()[1] as u64 - y
} else {
y
};
.flat_map(|(y, uvy)| {
x_it.clone().map(move |(x, uvx)| {
let ndc = if let Some(xyz) = wcs.unproj_xyz(&ImgXY::new(x as f64, y as f64)) {
let xyz = crate::coosys::apply_coo_system(
@@ -218,15 +211,14 @@ pub fn vertices(
})
.map(|(p, uu)| {
uv.extend_from_slice(&uu);
p
})
.collect::<Vec<_>>();
let mut indices = vec![];
let mut num_indices = vec![];
for idx_y_range in &idx_y_ranges {
for idx_x_range in &idx_x_ranges {
for idx_x_range in &idx_x_ranges {
for idx_y_range in &idx_y_ranges {
let build_indices_iter =
CCWCheckPatchIndexIter::new(idx_x_range, idx_y_range, num_x_vertices, &pos, camera);

View File

@@ -2,25 +2,28 @@ pub mod cuts;
pub mod grid;
pub mod subdivide_texture;
use al_core::texture::format::PixelType;
use al_core::texture::format::RGBA8U;
use al_core::texture::format::{R16I, R32F, R32I, R8U};
use al_core::convert::Cast;
use al_core::webgl_ctx::WebGlRenderingCtx;
use fitsrs::hdu::header::Bitpix;
use std::fmt::Debug;
use std::marker::Unpin;
use std::vec;
use al_api::coo_system::CooSystem;
use cgmath::Vector3;
use futures::stream::TryStreamExt;
use futures::AsyncRead;
use wasm_bindgen::JsValue;
use web_sys::WebGl2RenderingContext;
use fitsrs::wcs::{ImgXY, WCS};
use fitsrs::hdu::data::stream;
use wcs::{ImgXY, WCS};
use al_api::fov::CenteredFoV;
use al_api::hips::ImageMetadata;
use al_core::image::format::*;
use al_core::webgl_ctx::GlWrapper;
use al_core::VecData;
use al_core::WebGlContext;
@@ -33,9 +36,7 @@ use crate::ProjectionType;
use crate::ShaderManager;
use std::ops::Range;
use self::subdivide_texture::crop_image;
use self::subdivide_texture::ImagePatches;
type PixelItem<F> = <<F as ImageFormat>::P as Pixel>::Item;
pub struct Image {
/// A reference to the GL context
@@ -48,82 +49,179 @@ pub struct Image {
pos: Vec<f32>,
uv: Vec<f32>,
/// WCS allowing to locate the image on the sky
/// Parameters extracted from the fits
wcs: WCS,
/// Some parameters, only defined for image coming from FITS files
blank: Option<f32>,
bscale: f32,
bzero: f32,
scale: f32,
offset: f32,
cuts: Range<f32>,
/// The center of the fits
centered_fov: CenteredFoV,
//+ Texture format
pixel_type: PixelType,
channel: ChannelType,
/// Texture chunks objects
textures: Vec<Texture2D>,
/// Texture indices that must be drawn
idx_tex: Vec<usize>,
/// The size of a textured image patch
/// that can be uploaded to the GPU
w_patch: usize,
h_patch: usize,
/// The maximum webgl supported texture size
max_tex_size_x: usize,
max_tex_size_y: usize,
reg: Region,
// The coo system in which the polygonal region has been defined
coo_sys: CooSystem,
}
use al_core::pixel::Pixel;
use fitsrs::hdu::header::extension;
use fitsrs::hdu::AsyncHDU;
use futures::io::BufReader;
use futures::AsyncReadExt;
const TEX_PARAMS: &[(u32, u32)] = &[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST_MIPMAP_NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
];
impl Image {
#[allow(clippy::too_many_arguments)]
fn init_buffers(
gl: WebGlContext,
patches: ImagePatches,
pub async fn from_reader_and_wcs<R, F>(
gl: &WebGlContext,
mut reader: R,
wcs: WCS,
bscale: f32,
bzero: f32,
scale: Option<f32>,
offset: Option<f32>,
blank: Option<f32>,
// Coo sys of the view
coo_sys: CooSystem,
) -> Result<Self, JsValue> {
let dim = wcs.img_dimensions();
let (width, height) = (dim[0] as u64, dim[1] as u64);
) -> Result<Self, JsValue>
where
F: ImageFormat,
R: AsyncReadExt + Unpin,
{
let (width, height) = wcs.img_dimensions();
let ImagePatches {
pixel_type,
texture_patches: textures,
initial_cuts: cuts,
w_patch,
h_patch,
} = patches;
let max_tex_size =
WebGl2RenderingContext::get_parameter(gl, WebGl2RenderingContext::MAX_TEXTURE_SIZE)?
.as_f64()
.unwrap_or(4096.0) as usize;
let mut max_tex_size_x = max_tex_size;
let mut max_tex_size_y = max_tex_size;
// apply bscale to the cuts
let offset = offset.unwrap_or(0.0);
let scale = scale.unwrap_or(1.0);
let (textures, cuts) = if width <= max_tex_size as u64 && height <= max_tex_size as u64 {
max_tex_size_x = width as usize;
max_tex_size_y = height as usize;
// can fit in one texture
let num_pixels_to_read = (width as usize) * (height as usize);
let num_bytes_to_read = num_pixels_to_read * std::mem::size_of::<F::P>();
let mut buf = vec![0; num_bytes_to_read];
reader
.read_exact(&mut buf[..num_bytes_to_read])
.await
.map_err(|e| JsValue::from_str(&format!("{:?}", e)))?;
// bytes aligned
unsafe {
let data = std::slice::from_raw_parts_mut(
buf[..].as_mut_ptr() as *mut PixelItem<F>,
num_pixels_to_read * F::NUM_CHANNELS,
);
let texture = Texture2D::create_from_raw_pixels::<F>(
gl,
width as i32,
height as i32,
&[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST_MIPMAP_NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
],
Some(data),
)?;
let cuts = match F::CHANNEL_TYPE {
ChannelType::R32F | ChannelType::R64F => {
let pixels =
std::slice::from_raw_parts(data.as_ptr() as *const f32, data.len() / 4);
let mut sub_pixels = pixels
.iter()
.step_by(100)
.filter(|pixel| (*pixel).is_finite())
.cloned()
.collect::<Vec<_>>();
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
}
ChannelType::R8UI | ChannelType::R16I | ChannelType::R32I => {
// BLANK is only valid for those channels/BITPIX (> 0)
if let Some(blank) = blank {
let mut sub_pixels = data
.iter()
.step_by(100)
.filter_map(|pixel| {
let pixel = <PixelItem<F> as Cast<f32>>::cast(*pixel);
if pixel != blank {
Some(pixel)
} else {
None
}
})
.collect::<Vec<_>>();
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
} else {
// No blank value => we consider all the values
let mut sub_pixels = data
.iter()
.step_by(100)
.map(|pixel| <PixelItem<F> as Cast<f32>>::cast(*pixel))
.collect::<Vec<_>>();
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
}
}
// RGB(A) images
_ => 0.0..1.0,
};
(vec![texture], cuts)
}
} else {
subdivide_texture::crop_image::<F, R>(
gl,
width,
height,
reader,
max_tex_size as u64,
blank,
)
.await?
};
for tex in &textures {
tex.generate_mipmap();
}
let start = cuts.start * bscale + bzero;
let end = cuts.end * bscale + bzero;
let start = cuts.start * scale + offset;
let end = cuts.end * scale + offset;
let cuts = start..end;
@@ -133,7 +231,7 @@ impl Image {
let uv = vec![];
// Define the buffers
let vao = {
let mut vao = VertexArrayObject::new(&gl);
let mut vao = VertexArrayObject::new(gl);
#[cfg(feature = "webgl2")]
vao.bind_for_update()
@@ -159,6 +257,7 @@ impl Image {
vao
};
let gl = gl.clone();
// Compute the fov
let center = wcs
@@ -205,7 +304,7 @@ impl Image {
let idx_tex = (0..textures.len()).collect();
Ok(Self {
Ok(Image {
gl,
// The positions
@@ -218,19 +317,19 @@ impl Image {
// Metadata extracted from the fits
wcs,
// CooSystem of the wcs, this should belong to the WCS
bscale,
bzero,
scale,
offset,
blank,
// Centered field of view allowing to locate the fits
centered_fov,
// Texture parameters
pixel_type,
channel: F::CHANNEL_TYPE,
textures,
cuts,
w_patch,
h_patch,
max_tex_size_x,
max_tex_size_y,
// Indices of textures that must be drawn
idx_tex,
// The polygonal region in the sky
@@ -240,348 +339,127 @@ impl Image {
})
}
#[allow(clippy::too_many_arguments)]
pub fn from_fits_hdu(
gl: &WebGlContext,
// wcs extracted from the image HDU
wcs: fitsrs::WCS,
// bitpix extracted from the image HDU
bitpix: fitsrs::hdu::header::Bitpix,
// bytes slice extracted from the HDU
bytes: &[u8],
// other keywords extracted from the header of the image HDU
bscale: f32,
bzero: f32,
blank: Option<f32>,
// Coo sys of the view
coo_sys: CooSystem,
) -> Result<Self, JsValue> {
let dim = wcs.img_dimensions();
let (width, height) = (dim[0] as u64, dim[1] as u64);
let max_tex_size =
WebGl2RenderingContext::get_parameter(gl, WebGl2RenderingContext::MAX_TEXTURE_SIZE)?
.as_f64()
.unwrap_or(4096.0) as usize;
let patches = if width <= max_tex_size as u64 && height <= max_tex_size as u64 {
// can fit in one texture
// bytes aligned
match bitpix {
Bitpix::I64 => {
// one must convert the data to i32
let bytes_from_i32 = bytes
.chunks(8)
.flat_map(|bytes| {
let l = i64::from_be_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5],
bytes[6], bytes[7],
]);
let i = l as i32;
i32::to_be_bytes(i)
})
.collect::<Vec<_>>();
let texture = Texture2D::create_from_raw_bytes::<R32I>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes_from_i32.as_slice(),
)?;
let mut sub_pixels = bytes_from_i32
.chunks(std::mem::size_of::<i32>())
.step_by(100)
.filter_map(|p| {
let p = i32::from_be_bytes([p[0], p[1], p[2], p[3]]) as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
}
} else {
Some(p)
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R32I,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
Bitpix::F64 => {
// one must convert the data to f32
let bytes_from_f32 = bytes
.chunks(8)
.flat_map(|bytes| {
let d = f64::from_be_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5],
bytes[6], bytes[7],
]);
let f = d as f32;
f32::to_be_bytes(f)
})
.collect::<Vec<_>>();
let texture = Texture2D::create_from_raw_bytes::<R32F>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes_from_f32.as_slice(),
)?;
let mut sub_pixels = bytes_from_f32
.chunks(std::mem::size_of::<f32>())
.step_by(100)
.filter_map(|p| {
let p = f32::from_be_bytes([p[0], p[1], p[2], p[3]]);
if p.is_finite() {
Some(p)
} else {
None
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R32F,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
Bitpix::U8 => {
let texture = Texture2D::create_from_raw_bytes::<R8U>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes,
)?;
let mut sub_pixels = bytes
.iter()
.step_by(100)
.filter_map(|p| {
let p = *p as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
}
} else {
Some(p)
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R8U,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
Bitpix::I16 => {
let texture = Texture2D::create_from_raw_bytes::<R16I>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes,
)?;
let mut sub_pixels = bytes
.chunks(2)
.step_by(100)
.filter_map(|p| {
let p = i16::from_be_bytes([p[0], p[1]]) as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
}
} else {
Some(p)
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R16I,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
Bitpix::I32 => {
let texture = Texture2D::create_from_raw_bytes::<R32I>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes,
)?;
let mut sub_pixels = bytes
.chunks(4)
.step_by(100)
.filter_map(|p| {
let p = i32::from_be_bytes([p[0], p[1], p[2], p[3]]) as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
}
} else {
Some(p)
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R32I,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
Bitpix::F32 => {
let texture = Texture2D::create_from_raw_bytes::<R32F>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes,
)?;
let mut sub_pixels = bytes
.chunks(std::mem::size_of::<f32>())
.step_by(100)
.filter_map(|p| {
let p = f32::from_be_bytes([p[0], p[1], p[2], p[3]]);
if p.is_finite() {
Some(p)
} else {
None
}
})
.collect::<Vec<_>>();
let cuts = cuts::first_and_last_percent(&mut sub_pixels, 1, 99);
ImagePatches::new(
PixelType::R32F,
vec![texture],
cuts,
width as usize,
height as usize,
)
}
}
} else {
// We cut the image in 4096x4096 patches. It is already 64MB to allocate for a f32 image of this dimensions.
match bitpix {
Bitpix::U8 => crop_image::<R8U>(gl, width, height, bytes, 4096, blank)?,
Bitpix::I16 => crop_image::<R16I>(gl, width, height, bytes, 4096, blank)?,
Bitpix::I32 => crop_image::<R32I>(gl, width, height, bytes, 4096, blank)?,
Bitpix::F32 => crop_image::<R32F>(gl, width, height, bytes, 4096, blank)?,
Bitpix::F64 => {
let bytes_from_f32 = bytes
.chunks(8)
.flat_map(|bytes| {
let d = f64::from_be_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5],
bytes[6], bytes[7],
]);
let f = d as f32;
f32::to_be_bytes(f)
})
.collect::<Vec<_>>();
crop_image::<R32F>(gl, width, height, &bytes_from_f32, 4096, blank)?
}
_ => {
return Err(JsValue::from_str(
"I64/F64 for big fits images not supported.",
))
}
}
};
Self::init_buffers(gl.clone(), patches, wcs, bscale, bzero, blank, coo_sys)
pub fn get_cuts(&self) -> &Range<f32> {
&self.cuts
}
pub fn from_rgba_bytes(
pub async fn from_fits_hdu_async<'a, R>(
gl: &WebGlContext,
// bytes in TextureFormat
bytes: &[u8],
// wcs extracted from the image HDU
wcs: fitsrs::WCS,
// Coo sys of the view
hdu: &mut AsyncHDU<'a, BufReader<R>, extension::image::Image>,
coo_sys: CooSystem,
) -> Result<Self, JsValue> {
let dim = wcs.img_dimensions();
let (width, height) = (dim[0] as u64, dim[1] as u64);
) -> Result<Self, JsValue>
where
R: AsyncRead + Unpin + Debug + 'a,
{
// Load the FITS file
let header = hdu.get_header();
let max_tex_size =
WebGl2RenderingContext::get_parameter(gl, WebGl2RenderingContext::MAX_TEXTURE_SIZE)?
.as_f64()
.unwrap_or(4096.0) as usize;
let scale = header.get_parsed::<f64>(b"BSCALE ").map(|v| v.unwrap());
let offset = header.get_parsed::<f64>(b"BZERO ").map(|v| v.unwrap());
let blank = header.get_parsed::<f64>(b"BLANK ").map(|v| v.unwrap());
let bscale = 1.0;
let bzero = 0.0;
let blank = None;
// Create a WCS from a specific header unit
let wcs = WCS::from_fits_header(header)
.map_err(|e| JsValue::from_str(&format!("WCS parsing error: reason: {}", e)))?;
let image_patches = if width <= max_tex_size as u64 && height <= max_tex_size as u64 {
// small image case, can fit into a webgl texture
let textures = vec![Texture2D::create_from_raw_bytes::<RGBA8U>(
gl,
width as i32,
height as i32,
TEX_PARAMS,
bytes,
)?];
let pixel_ty = PixelType::RGBA8U;
let cuts = 0.0..1.0;
let data = hdu.get_data_mut();
ImagePatches::new(pixel_ty, textures, cuts, width as usize, height as usize)
} else {
crop_image::<RGBA8U>(gl, width, height, bytes, 4096, None)?
};
match data {
stream::Data::U8(data) => {
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
Self::init_buffers(
gl.clone(),
image_patches,
wcs,
bscale,
bzero,
blank,
coo_sys,
)
Self::from_reader_and_wcs::<_, R8UI>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
stream::Data::I16(data) => {
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
Self::from_reader_and_wcs::<_, R16I>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
stream::Data::I32(data) => {
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
Self::from_reader_and_wcs::<_, R32I>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
stream::Data::I64(data) => {
let reader = data
.map_ok(|v| {
let v = v[0] as i32;
v.to_le_bytes()
})
.into_async_read();
Self::from_reader_and_wcs::<_, R32I>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
stream::Data::F32(data) => {
let reader = data.map_ok(|v| v[0].to_le_bytes()).into_async_read();
Self::from_reader_and_wcs::<_, R32F>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
stream::Data::F64(data) => {
let reader = data
.map_ok(|v| {
let v = v[0] as f32;
v.to_le_bytes()
})
.into_async_read();
Self::from_reader_and_wcs::<_, R32F>(
gl,
reader,
wcs,
scale.map(|v| v as f32),
offset.map(|v| v as f32),
blank.map(|v| v as f32),
coo_sys,
)
.await
}
}
}
pub fn recompute_vertices(
@@ -589,8 +467,9 @@ impl Image {
camera: &CameraViewPort,
projection: &ProjectionType,
) -> Result<(), JsValue> {
let dim = self.wcs.img_dimensions();
let (width, height) = (dim[0] as f64, dim[1] as f64);
let (width, height) = self.wcs.img_dimensions();
let width = width as f64;
let height = height as f64;
let (x_mesh_range, y_mesh_range) =
if camera.get_field_of_view().intersects_region(&self.reg) {
@@ -612,13 +491,12 @@ impl Image {
let (pos, uv, indices, num_indices) = grid::vertices(
&(x_mesh_range.start, y_mesh_range.start),
&(x_mesh_range.end.ceil(), y_mesh_range.end.ceil()),
self.w_patch as u64,
self.h_patch as u64,
self.max_tex_size_x as u64,
self.max_tex_size_y as u64,
num_vertices,
camera,
&self.wcs,
projection,
self.pixel_type == PixelType::RGB8U || self.pixel_type == PixelType::RGBA8U,
);
self.pos = pos;
@@ -661,8 +539,7 @@ impl Image {
if self.coo_sys != camera.get_coo_system() {
self.coo_sys = camera.get_coo_system();
let dim = self.wcs.img_dimensions();
let (width, height) = (dim[0] as usize, dim[1] as usize);
let (width, height) = self.wcs.img_dimensions();
// the camera coo system is not sync with the one in which the region
// has been defined
@@ -722,50 +599,59 @@ impl Image {
..
} = cfg;
let shader = match self.pixel_type {
PixelType::RGBA8U => crate::shader::get_shader(
let shader = match self.channel {
ChannelType::RGBA8U => crate::shader::get_shader(
&self.gl,
shaders,
"image_base.vert",
"image_sampler.frag",
)?,
PixelType::RGB8U => crate::shader::get_shader(
ChannelType::R32F => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_sampler.frag")?
}
#[cfg(feature = "webgl2")]
ChannelType::R32I => crate::shader::get_shader(
&self.gl,
shaders,
"image_base.vert",
"image_sampler.frag",
"fits_base.vert",
"fits_isampler.frag",
)?,
PixelType::R32F => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_f32.frag")?
}
PixelType::R32I => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_i32.frag")?
}
PixelType::R16I => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_i16.frag")?
}
PixelType::R8U => {
crate::shader::get_shader(&self.gl, shaders, "fits_base.vert", "fits_u8.frag")?
}
#[cfg(feature = "webgl2")]
ChannelType::R16I => crate::shader::get_shader(
&self.gl,
shaders,
"fits_base.vert",
"fits_isampler.frag",
)?,
#[cfg(feature = "webgl2")]
ChannelType::R8UI => crate::shader::get_shader(
&self.gl,
shaders,
"fits_base.vert",
"fits_usampler.frag",
)?,
_ => return Err(JsValue::from_str("Image format type not supported")),
};
//self.gl.disable(WebGl2RenderingContext::CULL_FACE);
// 2. Draw it if its opacity is not null
blend_cfg.enable(&self.gl, || {
let mut off_indices = 0;
for &idx_tex in self.idx_tex.iter() {
for (idx, &idx_tex) in self.idx_tex.iter().enumerate() {
let texture = &self.textures[idx_tex];
let num_indices = self.num_indices[idx_tex] as i32;
let num_indices = self.num_indices[idx] as i32;
let shader_bound = shader.bind(&self.gl);
shader_bound
.attach_uniforms_from(colormaps)
.attach_uniforms_with_params_from(color, colormaps)
.attach_uniform("opacity", opacity)
.attach_uniform("tex", texture)
.attach_uniform("scale", &self.bscale)
.attach_uniform("offset", &self.bzero);
.attach_uniform("scale", &self.scale)
.attach_uniform("offset", &self.offset);
if let Some(blank) = self.blank {
shader_bound.attach_uniform("blank", &blank);
@@ -797,9 +683,4 @@ impl Image {
pub fn get_centered_fov(&self) -> &CenteredFoV {
&self.centered_fov
}
#[inline]
pub fn get_cuts(&self) -> &Range<f32> {
&self.cuts
}
}

View File

@@ -1,238 +1,194 @@
use al_core::texture::format::PixelType;
use al_core::image::format::ChannelType;
use wasm_bindgen::JsValue;
use futures::AsyncReadExt;
use super::cuts;
use al_core::texture::format::TextureFormat;
use al_core::image::format::ImageFormat;
use al_core::texture::pixel::Pixel;
use al_core::webgl_ctx::WebGlRenderingCtx;
use al_core::Texture2D;
use al_core::WebGlContext;
use std::ops::Range;
pub fn crop_image<F>(
use al_core::convert::Cast;
type PixelItem<F> = <<F as ImageFormat>::P as Pixel>::Item;
pub async fn crop_image<F, R>(
gl: &WebGlContext,
width: u64,
height: u64,
bytes: &[u8],
mut reader: R,
max_tex_size: u64,
blank: Option<f32>,
) -> Result<ImagePatches, JsValue>
) -> Result<(Vec<Texture2D>, Range<f32>), JsValue>
where
F: TextureFormat,
F: ImageFormat,
R: AsyncReadExt + Unpin,
{
let mut tex_chunks = vec![];
let num_texture_x = ((width / max_tex_size) + 1) as usize;
let num_texture_y = ((height / max_tex_size) + 1) as usize;
// Subdivision
let num_textures = ((width / max_tex_size) + 1) * ((height / max_tex_size) + 1);
let mut w = Vec::with_capacity(num_texture_x);
let mut h = Vec::with_capacity(num_texture_x);
let mut buf = vec![
0;
(max_tex_size as usize)
* std::mem::size_of::<<F::P as Pixel>::Item>()
* F::NUM_CHANNELS
];
for i in 0..num_texture_x {
let w_patch = if i == num_texture_x - 1 {
width % max_tex_size
for _ in 0..num_textures {
let tex_chunk = Texture2D::create_empty_with_format::<F>(
gl,
max_tex_size as i32,
max_tex_size as i32,
&[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST_MIPMAP_NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
],
)?;
tex_chunk.generate_mipmap();
tex_chunks.push(tex_chunk);
}
let mut pixels_written = 0;
let num_pixels = width * height;
const PIXEL_STEP: u64 = 256;
let step_x_cut = (width / PIXEL_STEP) as usize;
let step_y_cut = (height / PIXEL_STEP) as usize;
let mut sub_pixels = vec![];
let step_cut = step_x_cut.max(step_y_cut) + 1;
let num_texture_x = (width / max_tex_size) + 1;
let num_texture_y = (height / max_tex_size) + 1;
while pixels_written < num_pixels {
// Get the id of the texture to fill
let id_tx = (pixels_written % width) / max_tex_size;
let id_ty = (pixels_written / width) / max_tex_size;
let id_t = id_ty + id_tx * num_texture_y;
// For textures along the right-x border
let num_pixels_to_read = if id_tx == num_texture_x - 1 {
width - (pixels_written % width)
} else {
max_tex_size
};
let h_patch = max_tex_size;
w.push(w_patch as usize);
h.push(h_patch as usize);
}
let num_bytes_to_read = (num_pixels_to_read as usize)
* std::mem::size_of::<<F::P as Pixel>::Item>()
* F::NUM_CHANNELS;
let create_next_patches = |num_patches_per_row: usize| -> Vec<Vec<u8>> {
(0..num_patches_per_row)
.map(|_| {
vec![0_u8; (max_tex_size as usize) * (max_tex_size as usize) * F::NUM_CHANNELS]
})
.collect::<Vec<_>>()
};
if let Ok(()) = reader.read_exact(&mut buf[..num_bytes_to_read]).await {
// Tell where the data must go inside the texture
let off_y_px = id_ty * max_tex_size;
let mut buf = create_next_patches(num_texture_x);
let dy = (pixels_written / width) - off_y_px;
let view = unsafe {
let data = std::slice::from_raw_parts(
buf[..num_bytes_to_read].as_ptr() as *const <F::P as Pixel>::Item,
(num_pixels_to_read as usize) * F::NUM_CHANNELS,
);
let mut pixels_written = 0_usize;
let num_pixels = (width * height) as usize;
// compute the cuts if the pixel is grayscale
if (pixels_written / width) % (step_cut as u64) == 0 {
// We are in a good line
let xmin = pixels_written % width;
// Sampled pixels for computing automatic min/max cut values
const PIXEL_STEP: usize = 256;
let mut sub_pixels = vec![];
match F::CHANNEL_TYPE {
ChannelType::R32F | ChannelType::R64F => {
let pixels = std::slice::from_raw_parts(
data.as_ptr() as *const f32,
data.len() / 4,
);
let step_x_cut = (width as usize) / PIXEL_STEP;
let step_y_cut = (height as usize) / PIXEL_STEP;
let step_cut = step_x_cut.max(step_y_cut) + 1_usize;
for i in (0..width).step_by(step_cut) {
if (xmin..(xmin + num_pixels_to_read)).contains(&i) {
let j = (i - xmin) as usize;
let mut id_tx = 0;
let mut id_ty = 0;
while pixels_written < num_pixels {
let bytes_written = pixels_written * F::NUM_CHANNELS;
// For textures along the right-x border
let w_patch = w[id_tx];
let h_patch = h[id_tx];
let num_pixels_to_read = w_patch;
let num_bytes_to_read = num_pixels_to_read * F::NUM_CHANNELS;
// Tell where the data must go inside the texture
let off_y_px = id_ty * h_patch;
// line index
let y = pixels_written / (width as usize);
let dy = y - off_y_px;
let off_bytes_src = bytes_written;
let off_bytes_dst = dy * (max_tex_size as usize) * F::NUM_CHANNELS;
buf[id_tx][off_bytes_dst..(off_bytes_dst + num_bytes_to_read)]
.copy_from_slice(&bytes[off_bytes_src..(off_bytes_src + num_bytes_to_read)]);
pixels_written += num_pixels_to_read;
if F::PIXEL_TYPE.num_channels() == 1 && y % step_cut == 0 {
// on a good line
let bytes_line = &buf[id_tx][off_bytes_dst..(off_bytes_dst + num_bytes_to_read)];
for x_in_patch in (0..w_patch).step_by(step_cut) {
let x_byte_off = x_in_patch * F::NUM_CHANNELS;
let p = &bytes_line[x_byte_off..(x_byte_off + F::NUM_CHANNELS)];
let v = match F::PIXEL_TYPE {
PixelType::R8U => {
let p = p[0] as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
if pixels[j].is_finite() {
sub_pixels.push(pixels[j]);
}
}
}
} else {
Some(p)
}
}
PixelType::R16I => {
let p = i16::from_be_bytes([p[0], p[1]]) as f32;
ChannelType::R8UI | ChannelType::R16I | ChannelType::R32I => {
if let Some(blank) = blank {
for i in (0..width).step_by(step_cut) {
if (xmin..(xmin + num_pixels_to_read)).contains(&i) {
let j = (i - xmin) as usize;
if let Some(blank) = blank {
if p != blank {
Some(p)
let pixel = <PixelItem<F> as Cast<f32>>::cast(data[j]);
if pixel != blank {
sub_pixels.push(pixel);
}
}
}
} else {
None
for i in (0..width).step_by(step_cut) {
if (xmin..(xmin + num_pixels_to_read)).contains(&i) {
let j = (i - xmin) as usize;
let pixel = <PixelItem<F> as Cast<f32>>::cast(data[j]);
sub_pixels.push(pixel);
}
}
}
} else {
Some(p)
}
// colored pixels
_ => (),
}
PixelType::R32I => {
let p = i32::from_be_bytes([p[0], p[1], p[2], p[3]]) as f32;
if let Some(blank) = blank {
if p != blank {
Some(p)
} else {
None
}
} else {
Some(p)
}
}
PixelType::R32F => {
let p = f32::from_be_bytes([p[0], p[1], p[2], p[3]]);
if p.is_finite() {
Some(p)
} else {
None
}
}
_ => unreachable!(),
};
if let Some(v) = v {
sub_pixels.push(v);
}
}
F::view(data)
};
tex_chunks[id_t as usize]
.bind()
.tex_sub_image_2d_with_i32_and_i32_and_u32_and_type_and_opt_array_buffer_view(
0,
dy as i32,
num_pixels_to_read as i32,
1,
Some(view.as_ref()),
);
pixels_written += num_pixels_to_read;
} else {
return Err(JsValue::from_str(
"invalid data with respect to the NAXIS given in the WCS",
));
}
if (((dy + 1) % (max_tex_size as usize) == 0) && id_tx == buf.len() - 1)
|| pixels_written >= num_pixels
{
// we can create new textures of size max_tex_size
for patch_buf in &buf {
let tex_chunk = Texture2D::create_from_raw_bytes::<F>(
gl,
max_tex_size as i32,
max_tex_size as i32,
&[
(
WebGlRenderingCtx::TEXTURE_MIN_FILTER,
WebGlRenderingCtx::NEAREST_MIPMAP_NEAREST,
),
(
WebGlRenderingCtx::TEXTURE_MAG_FILTER,
WebGlRenderingCtx::NEAREST,
),
// Prevents s-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_S,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
// Prevents t-coordinate wrapping (repeating)
(
WebGlRenderingCtx::TEXTURE_WRAP_T,
WebGlRenderingCtx::CLAMP_TO_EDGE,
),
],
patch_buf,
)?;
tex_chunks.push(tex_chunk);
}
//buf.clear();
//buf = create_next_patches(num_texture_x);
id_ty = (id_ty + 1) % num_texture_y;
}
id_tx = (id_tx + 1) % num_texture_x;
}
let cuts = if F::PIXEL_TYPE.num_channels() == 1 {
let cuts = if F::CHANNEL_TYPE.is_colored() {
cuts::first_and_last_percent(&mut sub_pixels, 1, 99)
} else {
0.0..1.0
};
Ok(ImagePatches {
pixel_type: F::PIXEL_TYPE,
texture_patches: tex_chunks,
initial_cuts: cuts,
w_patch: max_tex_size as usize,
h_patch: max_tex_size as usize,
})
}
pub struct ImagePatches {
pub pixel_type: PixelType,
pub texture_patches: Vec<Texture2D>,
pub initial_cuts: Range<f32>,
pub w_patch: usize,
pub h_patch: usize,
}
impl ImagePatches {
pub fn new(
pixel_type: PixelType,
texture_patches: Vec<Texture2D>,
initial_cuts: Range<f32>,
w_patch: usize,
h_patch: usize,
) -> Self {
Self {
pixel_type,
texture_patches,
initial_cuts,
w_patch,
h_patch,
}
}
Ok((tex_chunks, cuts))
}

View File

@@ -1,5 +1,5 @@
use super::MOC;
use crate::{camera::CameraViewPort, SpaceMoc};
use crate::{camera::CameraViewPort, HEALPixCoverage};
use al_api::moc::MOCOptions;
pub struct MOCHierarchy {
@@ -12,13 +12,19 @@ use al_core::WebGlContext;
impl MOCHierarchy {
pub fn from_full_res_moc(
gl: WebGlContext,
full_res_moc: SpaceMoc,
full_res_moc: HEALPixCoverage,
options: &MOCOptions,
) -> Self {
let full_res_depth = full_res_moc.depth();
let mut mocs: Vec<_> = (0..full_res_depth)
.map(|d| MOC::new(gl.clone(), SpaceMoc(full_res_moc.degraded(d)), options))
.map(|d| {
MOC::new(
gl.clone(),
HEALPixCoverage(full_res_moc.degraded(d)),
options,
)
})
.collect();
mocs.push(MOC::new(gl.clone(), full_res_moc, options));
@@ -74,7 +80,7 @@ impl MOCHierarchy {
&mut self.mocs[d]
}
pub fn get_full_moc(&self) -> &SpaceMoc {
pub fn get_full_moc(&self) -> &HEALPixCoverage {
&self.mocs.last().unwrap().moc
}

View File

@@ -3,7 +3,7 @@ pub mod renderer;
pub use renderer::MOCRenderer;
use crate::camera::CameraViewPort;
use crate::healpix::moc::SpaceMoc;
use crate::healpix::coverage::HEALPixCoverage;
use crate::math::projection::ProjectionType;
use crate::renderable::WebGl2RenderingContext;
use crate::shader::ShaderManager;
@@ -32,11 +32,11 @@ pub struct MOC {
inner: [Option<MOCIntern>; 3],
pub moc: SpaceMoc,
pub moc: HEALPixCoverage,
}
impl MOC {
pub(super) fn new(gl: WebGlContext, moc: SpaceMoc, cfg: &MOCOptions) -> Self {
pub(super) fn new(gl: WebGlContext, moc: HEALPixCoverage, cfg: &MOCOptions) -> Self {
let sky_fraction = moc.sky_fraction() as f32;
let max_order = moc.depth_max();
@@ -228,7 +228,7 @@ impl MOCIntern {
fn vertices_in_view<'a>(
&self,
moc: &'a SpaceMoc,
moc: &'a HEALPixCoverage,
camera: &'a mut CameraViewPort,
) -> impl Iterator<Item = [(f64, f64); 4]> + 'a {
let view_moc = camera.get_cov(CooSystem::ICRS);
@@ -250,7 +250,7 @@ impl MOCIntern {
fn draw(
&mut self,
moc: &SpaceMoc,
moc: &HEALPixCoverage,
camera: &mut CameraViewPort,
proj: &ProjectionType,
shaders: &mut ShaderManager,
@@ -457,7 +457,7 @@ impl MOCIntern {
fn compute_edge_paths_iter<'a>(
&self,
moc: &'a SpaceMoc,
moc: &'a HEALPixCoverage,
camera: &'a mut CameraViewPort,
) -> impl Iterator<Item = f32> + 'a {
self.vertices_in_view(moc, camera).flat_map(|v| {

View File

@@ -1,4 +1,4 @@
use crate::{healpix::moc::SpaceMoc, CameraViewPort, ShaderManager};
use crate::{healpix::coverage::HEALPixCoverage, CameraViewPort, ShaderManager};
use al_core::WebGlContext;
use wasm_bindgen::JsValue;
@@ -67,7 +67,7 @@ impl MOCRenderer {
pub fn push_back(
&mut self,
moc: SpaceMoc,
moc: HEALPixCoverage,
cfg: MOCOptions,
camera: &mut CameraViewPort,
proj: &ProjectionType,
@@ -80,7 +80,7 @@ impl MOCRenderer {
//self.layers.push(key);
}
pub fn get_hpx_coverage(&self, moc_uuid: &str) -> Option<&SpaceMoc> {
pub fn get_hpx_coverage(&self, moc_uuid: &str) -> Option<&HEALPixCoverage> {
if let Some(idx) = self.cfgs.iter().position(|cfg| cfg.get_uuid() == moc_uuid) {
Some(self.mocs[idx].get_full_moc())
} else {

View File

@@ -12,8 +12,9 @@ pub mod utils;
use crate::renderable::image::Image;
use crate::tile_fetcher::TileFetcherQueue;
use al_core::image::format::ChannelType;
use al_api::color::ColorRGB;
use al_api::hips::DataproductType;
use al_api::hips::HiPSCfg;
use al_api::hips::ImageMetadata;
use al_api::image::ImageParams;
@@ -21,7 +22,6 @@ use al_api::image::ImageParams;
use al_core::colormap::Colormaps;
use al_core::shader::Shader;
use al_core::texture::format::PixelType;
use al_core::VertexArrayObject;
use al_core::WebGlContext;
@@ -176,7 +176,7 @@ impl Layers {
pub fn set_hips_url(&mut self, cdid: &CreatorDid, new_url: String) -> Result<(), JsValue> {
if let Some(hips) = self.hipses.get_mut(cdid) {
// update the root_url
hips.set_root_url(new_url);
hips.get_config_mut().set_root_url(new_url.clone());
Ok(())
} else {
@@ -214,13 +214,9 @@ impl Layers {
let raytracer = &self.raytracer;
let raytracing = camera.is_raytracing(projection);
// The first layer or the background must be plot with no blending
self.gl.disable(WebGl2RenderingContext::BLEND);
// Check whether a hips to plot is allsky
// if neither are, we draw a font
// if there are, we do not draw nothing
let mut idx_start_layer = -1;
for (idx, layer) in self.layers.iter().enumerate() {
@@ -230,8 +226,8 @@ impl Layers {
if let Some(hips) = self.hipses.get(cdid) {
// Check if a HiPS is fully opaque so that we cannot see the background
// In that case, no need to draw a background because a HiPS will fully cover it
let full_covering_hips = (hips.get_config().get_format().get_pixel_format()
== PixelType::RGB8U
let full_covering_hips = (hips.get_config().get_format().get_channel()
== ChannelType::RGB8U
|| hips.is_allsky())
&& meta.opacity == 1.0;
if full_covering_hips {
@@ -240,8 +236,6 @@ impl Layers {
}
}
let mut blending_enabled = false;
// Need to render transparency font
if idx_start_layer == -1 {
let vao = if raytracing {
@@ -265,9 +259,6 @@ impl Layers {
// The background (index -1) has been drawn, we can draw the first HiPS
idx_start_layer = 0;
self.gl.enable(WebGl2RenderingContext::BLEND);
blending_enabled = true;
}
let layers_to_render = &self.layers[(idx_start_layer as usize)..];
@@ -293,11 +284,6 @@ impl Layers {
}
}
}
if !blending_enabled {
self.gl.enable(WebGl2RenderingContext::BLEND);
blending_enabled = true;
}
}
Ok(())
@@ -310,8 +296,10 @@ impl Layers {
proj: &ProjectionType,
tile_fetcher: &mut TileFetcherQueue,
) -> Result<usize, JsValue> {
let err_layer_not_found =
JsValue::from_str(&format!("Layer {layer:?} not found, so cannot be removed."));
let err_layer_not_found = JsValue::from_str(&format!(
"Layer {:?} not found, so cannot be removed.",
layer
));
// Color configs, and urls are indexed by layer
self.meta.remove(layer).ok_or(err_layer_not_found.clone())?;
let id = self.ids.remove(layer).ok_or(err_layer_not_found.clone())?;
@@ -345,15 +333,18 @@ impl Layers {
Ok(id_layer)
} else {
Err(JsValue::from_str(&format!(
"Url found {id:?} is associated to no 2D HiPSes."
"Url found {:?} is associated to no 2D HiPSes.",
id
)))
}
}
}
pub fn rename_layer(&mut self, layer: &str, new_layer: &str) -> Result<(), JsValue> {
let err_layer_not_found =
JsValue::from_str(&format!("Layer {layer:?} not found, so cannot be removed."));
let err_layer_not_found = JsValue::from_str(&format!(
"Layer {:?} not found, so cannot be removed.",
layer
));
// layer from layers does also need to be removed
let id_layer = self
@@ -380,14 +371,16 @@ impl Layers {
.iter()
.position(|l| l == first_layer)
.ok_or(JsValue::from_str(&format!(
"Layer {first_layer:?} not found, so cannot be removed."
"Layer {:?} not found, so cannot be removed.",
first_layer
)))?;
let id_second_layer =
self.layers
.iter()
.position(|l| l == second_layer)
.ok_or(JsValue::from_str(&format!(
"Layer {second_layer:?} not found, so cannot be removed.",
"Layer {:?} not found, so cannot be removed.",
second_layer
)))?;
self.layers.swap(id_first_layer, id_second_layer);
@@ -450,13 +443,11 @@ impl Layers {
}*/
camera.register_view_frame(cfg.get_frame(), proj);
let hips = match &cfg.dataproduct_type {
let hips = if cfg.get_cube_depth().is_some() {
// HiPS cube
DataproductType::Cube => HiPS::D3(HiPS3D::new(cfg, gl, &layer)?),
// HiPS 3D
DataproductType::SpectralCube => HiPS::D3(HiPS3D::new(cfg, gl, &layer)?),
// Typical HiPS image
_ => HiPS::D2(HiPS2D::new(cfg, gl)?),
HiPS::D3(HiPS3D::new(cfg, gl)?)
} else {
HiPS::D2(HiPS2D::new(cfg, gl)?)
};
// add the frame to the camera
@@ -507,6 +498,17 @@ impl Layers {
let fits_already_found = self.images.keys().any(|image_id| image_id == &id);
if !fits_already_found {
// The fits has not been loaded yet
/*if let Some(initial_ra) = properties.get_initial_ra() {
if let Some(initial_dec) = properties.get_initial_dec() {
camera.set_center::<P>(&LonLatT::new(Angle((initial_ra).to_radians()), Angle((initial_dec).to_radians())), &properties.get_frame());
}
}
if let Some(initial_fov) = properties.get_initial_fov() {
camera.set_aperture::<P>(Angle((initial_fov).to_radians()));
}*/
self.images.insert(id.clone(), images);
}
@@ -529,7 +531,7 @@ impl Layers {
pub fn set_layer_cfg(&mut self, layer: String, meta: ImageMetadata) -> Result<(), JsValue> {
// Expect the image hips to be found in the hash map
self.meta.insert(layer.clone(), meta).ok_or_else(|| {
JsValue::from(js_sys::Error::new(&format!("{layer:?} layer not found")))
JsValue::from(js_sys::Error::new(&format!("{:?} layer not found", layer)))
})?;
Ok(())

View File

@@ -26,13 +26,13 @@ impl From<Error> for JsValue {
fn from(e: Error) -> Self {
match e {
Error::ShaderAlreadyInserted { message } => {
JsValue::from_str(&format!("Shader already inserted: {message:?}"))
JsValue::from_str(&format!("Shader already inserted: {:?}", message))
}
Error::ShaderNotFound { message } => {
JsValue::from_str(&format!("Shader not found: {message:?}"))
JsValue::from_str(&format!("Shader not found: {:?}", message))
}
Error::FileNotFound { message } => {
JsValue::from_str(&format!("Shader not found: {message:?}"))
JsValue::from_str(&format!("Shader not found: {:?}", message))
}
Error::ShaderCompilingLinking { message } => message,
Error::Io { message } => message.into(),

View File

@@ -1,4 +1,3 @@
use crate::downloader::query::CellDesc;
use crate::downloader::{query, Downloader};
use crate::time::{DeltaTime, Time};
use crate::Abort;
@@ -73,10 +72,8 @@ impl HiPSLocalFiles {
tiles_per_fmt[d].get(&i)
}
}
impl HiPSLocalFiles {
pub fn get_moc(&self) -> &web_sys::File {
fn get_moc(&self) -> &web_sys::File {
&self.moc
}
}
@@ -147,23 +144,16 @@ impl TileFetcherQueue {
fn check_in_file_list(&self, mut query: Tile) -> Result<Tile, JsValue> {
if let Some(local_hips) = self.hips_local_files.get(&query.hips_cdid) {
// TODO modify local hips file structure to support freq indices as well
match query.cell {
CellDesc::HiPS2D { cell, .. } => {
if let Some(tile) = local_hips.get_tile(&cell, *query.format.get_ext_file()) {
if let Ok(url) = web_sys::Url::create_object_url_with_blob(tile.as_ref()) {
// rewrite the url
query.url = url;
Ok(query)
} else {
Err(JsValue::from_str("could not create an url from the tile"))
}
} else {
Ok(query)
}
if let Some(tile) = local_hips.get_tile(&query.cell, *query.format.get_ext_file()) {
if let Ok(url) = web_sys::Url::create_object_url_with_blob(tile.as_ref()) {
// rewrite the url
query.url = url;
Ok(query)
} else {
Err(JsValue::from_str("could not create an url from the tile"))
}
// TODO Support for HiPS3D/Cube
_ => Ok(query),
} else {
Ok(query)
}
} else {
Ok(query)
@@ -199,10 +189,28 @@ impl TileFetcherQueue {
downloader: Rc<RefCell<Downloader>>,
) {
let cfg = hips.get_config();
// Request for the allsky first
// The allsky is not mandatory present in a HiPS service but it is better to first try to search for it
//downloader.fetch(query::PixelMetadata::new(cfg));
// Try to fetch the MOC
let hips_cdid = cfg.get_creator_did();
let moc_url = if let Some(local_hips) = self.hips_local_files.get(hips_cdid) {
if let Ok(url) =
web_sys::Url::create_object_url_with_blob(local_hips.get_moc().as_ref())
{
url
} else {
format!("{}/Moc.fits", cfg.get_root_url())
}
} else {
format!("{}/Moc.fits", cfg.get_root_url())
};
downloader.borrow_mut().fetch(query::Moc::new(
cfg,
&self.hips_local_files,
moc_url,
cfg.get_request_mode(),
cfg.get_request_credentials(),
cfg.get_creator_did().to_string(),
MOCOptions::default(),
));
@@ -211,26 +219,24 @@ impl TileFetcherQueue {
// Request the allsky
let dl = downloader.clone();
// Allsky query
match hips {
HiPS::D2(_) => {
let allsky_query = query::Allsky::new(cfg, None);
let allsky_query = query::Allsky::new(
cfg,
match hips {
HiPS::D2(_) => None,
HiPS::D3(h) => Some(h.get_slice() as u32),
},
);
crate::utils::set_timeout(
move || {
dl.borrow_mut().fetch(allsky_query);
},
100,
);
}
// Do not ask for allsky for HiPS3D
HiPS::D3(_) => (),
}
crate::utils::set_timeout(
move || {
dl.borrow_mut().fetch(allsky_query);
},
100,
);
// FIXME: this still might be important to keep but for HiPS2D only
/*if cfg.get_min_depth_tile() == 0 {
if cfg.get_min_depth_tile() == 0 {
for tile_cell in crate::healpix::cell::ALLSKY_HPX_CELLS_D0 {
if let Ok(query) = self.check_in_file_list(hips.build_tile_query(tile_cell)) {
if let Ok(query) = self.check_in_file_list(hips.get_tile_query(tile_cell)) {
let dl = downloader.clone();
crate::utils::set_timeout(
@@ -241,6 +247,6 @@ impl TileFetcherQueue {
);
}
}
}*/
}
}
}

View File

@@ -12,7 +12,7 @@ impl Time {
let r = f()?;
let duration = Time::now() - start_time;
// print the duration in the console
al_core::log(&format!("{label:?} time: {duration:?}"));
al_core::log(&format!("{:?} time: {:?}", label, duration));
Ok(r)
}

View File

@@ -23,27 +23,6 @@
color: white;
}
.aladin-lite-spectra-displayer .aladin-spectra-unit-selector {
position: absolute;
bottom: 3rem;
right: 0;
}
.aladin-lite-spectra-displayer .aladin-spectra-home {
position: absolute;
bottom: 5rem;
right: 0;
}
.aladin-lite-spectra-displayer .aladin-spectra-extraction {
position: absolute;
bottom: 7rem;
right: 0;
}
.aladin-lite-spectra-displayer .aladin-spectra-hips-selector {
position: absolute;
top: 0;
left: 0;
max-width: 10rem;
}
.aladin-imageCanvas {
position: absolute;
@@ -867,6 +846,7 @@
padding: 0.2rem 0;
font-size: inherit;
border-radius: 0.2rem;
box-shadow: 0 0 1em 0 rgba(0, 0, 0, 0.2);
cursor: pointer;
font-family: monospace;
box-sizing: content-box;

View File

@@ -1,45 +0,0 @@
// Utils methods for decoding texture bytes to f32, i32, i16, u8
highp float decode_f32(highp vec4 rgba) {
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
if (abs(Exponent + 127.0) < 1e-3) {
return 0.0;
}
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
return Result;
}
int decode_i32(vec4 rgba) {
int r = int(rgba.r * 255.0 + 0.5);
int g = int(rgba.g * 255.0 + 0.5);
int b = int(rgba.b * 255.0 + 0.5);
int a = int(rgba.a * 255.0 + 0.5);
// GLSL int automatically handle the top-most sign bit (two's complement behaviour)
int value = (r << 24) | (g << 16) | (b << 8) | a; // Combine into a 16-bit integer
return value;
}
int decode_i16(vec2 rg) {
int r = int(rg.r * 255.0 + 0.5);
int g = int(rg.g * 255.0 + 0.5);
int value = (r << 8) | g; // Combine into a 16-bit integer
// Convert from unsigned to signed 16-bit
if (value >= 32768) {
value -= 65536;
}
return value;
}
uint decode_u8(float r) {
uint value = uint(r * 255.0 + 0.5);
return value;
}

View File

@@ -1,57 +0,0 @@
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
#include ../colormaps/colormap.glsl;
#include ../transfer_funcs.glsl;
#include ../tonal_corrections.glsl;
#include ../decode.glsl;
/////////////////////////////////////////////
/// FITS sampler
vec4 val2c_f32(float x) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x)));
return apply_tonal(new_color);
}
vec4 val2c(float x) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank || isnan(x)));
return apply_tonal(new_color);
}
vec4 uv2c_f32(vec2 uv) {
float val = decode_f32(texture(tex, uv).rgba*255.0);
return val2c_f32(val);
}
vec4 uv2c_i32(vec2 uv) {
float val = float(decode_i32(texture(tex, uv).rgba));
return val2c(val);
}
vec4 uv2c_i16(vec2 uv) {
float val = float(decode_i16(texture(tex, uv).rg));
return val2c(val);
}
vec4 uv2c_u8(vec2 uv) {
float val = float(decode_u8(texture(tex, uv).r));
return val2c(val);
}

View File

@@ -1,21 +0,0 @@
#version 300 es
precision highp float;
precision highp sampler2D;
precision highp int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform sampler2D tex;
uniform float opacity;
#include ./color.glsl;
void main() {
// FITS y axis looks down
vec2 uv = frag_uv;
uv.y = 1.0 - uv.y;
out_frag_color = uv2c_f32(frag_uv);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -1,21 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2D;
precision mediump int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform sampler2D tex;
uniform float opacity;
#include ./color.glsl;
void main() {
// FITS y axis looks down
vec2 uv = frag_uv;
uv.y = 1.0 - uv.y;
out_frag_color = uv2c_i16(frag_uv);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -1,21 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2D;
precision mediump int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform sampler2D tex;
uniform float opacity;
#include ./color.glsl;
void main() {
// FITS y axis looks down
vec2 uv = frag_uv;
uv.y = 1.0 - uv.y;
out_frag_color = uv2c_i32(frag_uv);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -0,0 +1,43 @@
#version 300 es
precision lowp float;
precision lowp sampler2D;
precision lowp isampler2D;
precision lowp usampler2D;
precision mediump int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform isampler2D tex;
uniform float opacity;
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
#include ./../colormaps/colormap.glsl;
#include ./../hips/transfer_funcs.glsl;
#include ./../hips/tonal_corrections.glsl;
vec4 apply_colormap_to_grayscale(float x, float a) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha) * a, vec4(0.0), float(x == blank || isnan(x)));
return apply_tonal(new_color);
}
void main() {
ivec4 color = texture(tex, frag_uv);
out_frag_color = apply_colormap_to_grayscale(float(color.r), float(color.a));
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -0,0 +1,55 @@
#version 300 es
precision highp float;
precision highp sampler2D;
precision lowp isampler2D;
precision lowp usampler2D;
precision highp int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform sampler2D tex;
uniform float opacity;
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
#include ./../colormaps/colormap.glsl;
#include ./../hips/transfer_funcs.glsl;
#include ./../hips/tonal_corrections.glsl;
vec4 apply_colormap_to_grayscale(float x) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x)));
return apply_tonal(new_color);
}
highp float decode32(highp vec4 rgba) {
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
if (abs(Exponent + 127.0) < 1e-3) {
return 0.0;
}
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
return Result;
}
void main() {
highp float value = decode32(texture(tex, frag_uv).abgr*255.0);
// reconstruct the float value
out_frag_color = apply_colormap_to_grayscale(value);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -1,21 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2D;
precision mediump int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform sampler2D tex;
uniform float opacity;
#include ./color.glsl;
void main() {
// FITS y axis looks down
vec2 uv = frag_uv;
uv.y = 1.0 - uv.y;
out_frag_color = uv2c_u8(frag_uv);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -0,0 +1,43 @@
#version 300 es
precision lowp float;
precision lowp sampler2D;
precision lowp isampler2D;
precision lowp usampler2D;
precision mediump int;
out vec4 out_frag_color;
in vec2 frag_uv;
uniform usampler2D tex;
uniform float opacity;
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
#include ./../colormaps/colormap.glsl;
#include ./../hips/transfer_funcs.glsl;
#include ./../hips/tonal_corrections.glsl;
vec4 apply_colormap_to_grayscale(float x, float a) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha) * a, vec4(0.0), float(x == blank || isnan(x)));
return apply_tonal(new_color);
}
void main() {
uvec4 color = texture(tex, frag_uv);
out_frag_color = apply_colormap_to_grayscale(float(color.r), float(color.a));
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -5,106 +5,67 @@ uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
uniform int tex_storing_fits;
#include ../colormaps/colormap.glsl;
#include ../transfer_funcs.glsl;
#include ../tonal_corrections.glsl;
#include ../hsv.glsl;
#include ../decode.glsl;
#include ./transfer_funcs.glsl;
#include ./tonal_corrections.glsl;
#include ./hsv.glsl;
/////////////////////////////////////////////
/// RED sampler
vec4 uvw2c_r(vec3 uv) {
vec2 va = texture(tex, uv).ra;
va.x = transfer_func(H, va.x, min_value, max_value);
// apply reversed
va.x = mix(va.x, 1.0 - va.x, reversed);
vec4 c = colormap_f(va.x);
return apply_tonal(c);
vec4 get_pixels(vec3 uv) {
return texture(tex, uv);
}
/// RGBA sampler
vec4 uvw2c_rgba(vec3 uv) {
vec4 c = texture(tex, uv).rgba;
c.r = transfer_func(H, c.r, min_value, max_value);
c.g = transfer_func(H, c.g, min_value, max_value);
c.b = transfer_func(H, c.b, min_value, max_value);
// apply reversed
c.rgb = mix(c.rgb, 1.0 - c.rgb, reversed);
return apply_tonal(c);
vec3 reverse_uv(vec3 uv) {
uv.y = 1.0 - uv.y;
return uv;
}
vec4 uvw2c_ra(vec3 uv) {
vec2 c = texture(tex, uv).rg;
c.r = transfer_func(H, c.r, min_value, max_value);
vec4 apply_color_settings(vec4 color) {
color.r = transfer_func(H, color.r, min_value, max_value);
color.g = transfer_func(H, color.g, min_value, max_value);
color.b = transfer_func(H, color.b, min_value, max_value);
// apply reversed
c.r = mix(c.r, 1.0 - c.r, reversed);
color.rgb = mix(color.rgb, 1.0 - color.rgb, reversed);
vec3 color = colormap_f(c.r).rgb;
return apply_tonal(vec4(color, c.g));
return apply_tonal(color);
}
vec4 uvw2cmap_rgba(vec3 uv) {
float v = texture(tex, uv).r;
// apply transfer f
v = transfer_func(H, v, min_value, max_value);
// apply cmap
vec4 c = colormap_f(v);
// apply reversed
c.rgb = mix(c.rgb, 1.0 - c.rgb, reversed);
return apply_tonal(c);
vec4 get_color_from_texture(vec3 UV) {
vec4 color = get_pixels(UV);
return apply_color_settings(color);
}
/////////////////////////////////////////////
/// FITS sampler
vec4 val2c_f32(float x) {
vec4 apply_colormap_to_grayscale(float x) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x)));
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(isinf(x) || isnan(x)));
return apply_tonal(new_color);
}
vec4 val2c(float x) {
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank || isnan(x)));
return apply_tonal(new_color);
highp float decode32(highp vec4 rgba) {
highp float Sign = 1.0 - step(128.0,rgba[0])*2.0;
highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0;
highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000);
highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 ));
return Result;
}
vec4 uvw2c_f32(vec3 uv) {
float val = decode_f32(texture(tex, uv).rgba*255.0);
return val2c_f32(val);
vec4 get_colormap_from_grayscale_texture(vec3 UV) {
// FITS data pixels are reversed along the y axis
vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1));
float value = decode32(get_pixels(uv).abgr*255.0);
return apply_colormap_to_grayscale(value);
}
vec4 uvw2c_i32(vec3 uv) {
float val = float(decode_i32(texture(tex, uv).rgba));
return val2c(val);
}
vec4 uvw2c_i16(vec3 uv) {
float val = float(decode_i16(texture(tex, uv).rg));
return val2c(val);
}
vec4 uvw2c_u8(vec3 uv) {
float val = float(decode_u8(texture(tex, uv).r));
return val2c(val);
}
vec4 get_colormap_from_color_texture(vec3 uv) {
float value = get_pixels(uv).r;
return apply_colormap_to_grayscale(value);
}

View File

@@ -0,0 +1,39 @@
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
uniform int tex_storing_fits;
#include ../colormaps/colormap.glsl;
#include ./transfer_funcs.glsl;
#include ./tonal_corrections.glsl;
ivec4 get_pixels(vec3 uv) {
return ivec4(texture(tex, uv));
}
vec3 reverse_uv(vec3 uv) {
uv.y = 1.0 - uv.y;
return uv;
}
vec4 get_colormap_from_grayscale_texture(vec3 UV) {
// FITS data pixels are reversed along the y axis
vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1));
float x = float(get_pixels(uv).r);
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank));
return apply_tonal(new_color);
}

View File

@@ -0,0 +1,38 @@
uniform float scale;
uniform float offset;
uniform float blank;
uniform float min_value;
uniform float max_value;
uniform int H;
uniform float reversed;
uniform int tex_storing_fits;
#include ../colormaps/colormap.glsl;
#include ./transfer_funcs.glsl;
#include ./tonal_corrections.glsl;
uvec4 get_pixels(vec3 uv) {
return uvec4(texture(tex, uv));
}
vec3 reverse_uv(vec3 uv) {
uv.y = 1.0 - uv.y;
return uv;
}
vec4 get_colormap_from_grayscale_texture(vec3 UV) {
// FITS data pixels are reversed along the y axis
vec3 uv = mix(UV, reverse_uv(UV), float(tex_storing_fits == 1));
float x = float(get_pixels(uv).r);
float alpha = x * scale + offset;
alpha = transfer_func(H, alpha, min_value, max_value);
// apply reversed
alpha = mix(alpha, 1.0 - alpha, reversed);
vec4 new_color = mix(colormap_f(alpha), vec4(0.0), float(x == blank));
return apply_tonal(new_color);
}

View File

@@ -1,6 +1,8 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
@@ -14,8 +16,8 @@ uniform float opacity;
#include ../color.glsl;
void main() {
vec4 color_start = uvw2cmap_rgba(frag_uv_start);
vec4 color_end = uvw2cmap_rgba(frag_uv_end);
vec4 color_start = get_color_from_texture(frag_uv_start);
vec4 color_end = get_color_from_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = opacity * out_frag_color.a;

View File

@@ -1,6 +1,8 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
@@ -14,8 +16,8 @@ uniform float opacity;
#include ../color.glsl;
void main() {
vec4 color_start = uvw2c_rgba(frag_uv_start);
vec4 color_end = uvw2c_rgba(frag_uv_end);
vec4 color_start = get_colormap_from_color_texture(frag_uv_start);
vec4 color_end = get_colormap_from_color_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = opacity * out_frag_color.a;

View File

@@ -1,29 +0,0 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
uniform sampler2DArray tex;
in vec3 frag_uv_start;
in vec3 frag_uv_end;
in float frag_blending_factor;
out vec4 out_frag_color;
#include ../color.glsl;
uniform float opacity;
void main() {
// FITS data pixels are reversed along the y axis
vec3 uv0 = frag_uv_start;
vec3 uv1 = frag_uv_end;
uv0.y = 1.0 - uv0.y;
uv1.y = 1.0 - uv1.y;
vec4 color_start = uvw2c_f32(uv0);
vec4 color_end = uvw2c_f32(uv1);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = out_frag_color.a * opacity;
}

View File

@@ -1,6 +1,8 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
@@ -15,14 +17,8 @@ out vec4 out_frag_color;
uniform float opacity;
void main() {
// FITS data pixels are reversed along the y axis
vec3 uv0 = frag_uv_start;
vec3 uv1 = frag_uv_end;
uv0.y = 1.0 - uv0.y;
uv1.y = 1.0 - uv1.y;
vec4 color_start = uvw2c_u8(uv0);
vec4 color_end = uvw2c_u8(uv1);
vec4 color_start = get_colormap_from_grayscale_texture(frag_uv_start);
vec4 color_end = get_colormap_from_grayscale_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = out_frag_color.a * opacity;

View File

@@ -1,8 +1,10 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
uniform isampler2DArray tex;
in vec3 frag_uv_start;
in vec3 frag_uv_end;
@@ -10,19 +12,13 @@ in float frag_blending_factor;
out vec4 out_frag_color;
#include ../color.glsl;
#include ../color_i.glsl;
uniform float opacity;
void main() {
// FITS data pixels are reversed along the y axis
vec3 uv0 = frag_uv_start;
vec3 uv1 = frag_uv_end;
uv0.y = 1.0 - uv0.y;
uv1.y = 1.0 - uv1.y;
vec4 color_start = uvw2c_i16(uv0);
vec4 color_end = uvw2c_i16(uv1);
vec4 color_start = get_colormap_from_grayscale_texture(frag_uv_start);
vec4 color_end = get_colormap_from_grayscale_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = out_frag_color.a * opacity;

View File

@@ -1,8 +1,10 @@
#version 300 es
precision lowp float;
precision lowp sampler2DArray;
precision lowp isampler2DArray;
precision lowp usampler2DArray;
uniform sampler2DArray tex;
uniform usampler2DArray tex;
in vec3 frag_uv_start;
in vec3 frag_uv_end;
@@ -10,19 +12,13 @@ in float frag_blending_factor;
out vec4 out_frag_color;
#include ../color.glsl;
#include ../color_u.glsl;
uniform float opacity;
void main() {
// FITS data pixels are reversed along the y axis
vec3 uv0 = frag_uv_start;
vec3 uv1 = frag_uv_end;
uv0.y = 1.0 - uv0.y;
uv1.y = 1.0 - uv1.y;
vec4 color_start = uvw2c_i32(uv0);
vec4 color_end = uvw2c_i32(uv1);
vec4 color_start = get_colormap_from_grayscale_texture(frag_uv_start);
vec4 color_end = get_colormap_from_grayscale_texture(frag_uv_end);
out_frag_color = mix(color_start, color_end, frag_blending_factor);
out_frag_color.a = out_frag_color.a * opacity;

View File

@@ -2,6 +2,7 @@
precision highp float;
layout (location = 0) in vec3 xyz;
//layout (location = 0) in vec2 lonlat;
layout (location = 1) in vec3 uv_start;
layout (location = 2) in vec3 uv_end;
layout (location = 3) in float time_tile_received;

Some files were not shown because too many files have changed in this diff Show More