Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
cris_l1b
user
Commits
84442b88
Commit
84442b88
authored
Apr 18, 2022
by
R.K.Garcia
Browse files
Properly handle float32 based on source array or range
parent
e622d9c9
Pipeline
#36845
passed with stage
in 7 minutes and 10 seconds
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
pca/l1b-pcs/autoscale.py
View file @
84442b88
...
...
@@ -8,6 +8,14 @@ def nanrange(npa: np.ndarray) -> (float, float):
return
np
.
nanmin
(
q
),
np
.
nanmax
(
q
)
def
_first_non_none_dtype
(
*
seq
):
for
each
in
seq
:
dt
=
getattr
(
each
,
'dtype'
,
None
)
if
dt
is
not
None
:
return
dt
return
None
class
Autoscale
:
vmin
:
int
=
None
vmax
:
int
=
None
...
...
@@ -16,13 +24,17 @@ class Autoscale:
scale
:
float
=
None
offset
:
float
=
None
dtype
=
None
def
__init__
(
self
,
data
:
np
.
ndarray
=
None
,
qmin
:
float
=
None
,
qmax
:
float
=
None
,
def
__init__
(
self
,
data
:
np
.
ndarray
=
None
,
qmin
:
float
=
None
,
qmax
:
float
=
None
,
qdtype
=
None
,
vmin
:
int
=
-
32767
,
vmax
:
int
=
32767
,
vnan
:
int
=
-
32768
,
vdtype
=
np
.
int16
):
self
.
vmin
=
vmin
self
.
vmax
=
vmax
self
.
vnan
=
vnan
self
.
vdtype
=
vdtype
if
qdtype
is
None
:
qdtype
=
_first_non_none_dtype
(
data
,
qmax
,
qmin
)
self
.
dtype
=
qdtype
or
np
.
float64
if
(
data
is
not
None
)
and
(
qmin
is
None
or
qmax
is
None
):
qrn
,
qrx
=
nanrange
(
data
)
qmin
=
qmin
if
(
qmin
is
not
None
)
else
qrn
...
...
@@ -30,14 +42,22 @@ class Autoscale:
if
qmin
==
qmax
or
(
qmin
is
None
)
or
(
qmax
is
None
):
raise
ValueError
(
f
"invalid range on input:
{
qmin
}
-
{
qmax
}
"
)
# print(f"{qmin} - {qmax}, {vmin} - {vmax}")
self
.
scale
=
(
qmax
-
qmin
)
/
(
self
.
vmax
-
self
.
vmin
)
self
.
offset
=
self
.
vmin
*
-
self
.
scale
+
qmin
self
.
scale
=
(
float
(
qmax
)
-
float
(
qmin
)
)
/
float
(
self
.
vmax
-
self
.
vmin
)
self
.
offset
=
self
.
vmin
*
-
self
.
scale
+
float
(
qmin
)
def
invert
(
self
,
scaled_value
):
def
invert
(
self
,
scaled_value
,
ideal
=
False
):
""" convert from scaled integer representation
"""
return
np
.
where
(
self
.
vnan
==
scaled_value
,
np
.
NAN
,
self
.
scale
*
float
(
scaled_value
)
+
self
.
offset
)
if
not
ideal
:
sa
=
self
.
attrs
scale
=
sa
[
'scale'
]
offset
=
sa
[
'offset'
]
else
:
scale
=
self
.
scale
offset
=
self
.
offset
r
=
np
.
where
(
self
.
vnan
==
scaled_value
,
np
.
NAN
,
scale
*
scaled_value
+
offset
)
return
r
.
astype
(
self
.
dtype
)
if
not
ideal
else
r
def
convert
(
self
,
q
:
np
.
ndarray
)
->
np
.
ndarray
:
""" convert to scaled integer representation
...
...
@@ -54,14 +74,16 @@ class Autoscale:
@
property
def
attrs
(
self
):
return
{
'scale'
:
self
.
scale
,
'offset'
:
self
.
offset
,
""" CF attributes and fill value
"""
return
{
'scale'
:
np
.
array
(
self
.
scale
,
dtype
=
self
.
dtype
),
'offset'
:
np
.
array
(
self
.
offset
,
dtype
=
self
.
dtype
),
'_FillValue'
:
self
.
vnan
}
def
_rando
(
shape
=
(
5
,
5
)):
r
=
np
.
random
.
random
(
shape
)
r
=
np
.
random
.
random
(
shape
)
.
astype
(
np
.
float32
)
r
[
np
.
random
.
random
(
shape
)
<
0.1
]
=
np
.
NAN
return
r
...
...
@@ -70,7 +92,10 @@ def test_autoscale(qin=None):
q
=
qin
or
_rando
()
a
=
Autoscale
(
q
)
qs
=
a
.
convert
(
q
)
assert
np
.
isclose
(
a
.
invert
(
a
.
vmax
),
np
.
nanmax
(
q
.
ravel
()))
assert
np
.
isclose
(
a
.
invert
(
a
.
vmin
),
np
.
nanmin
(
q
.
ravel
()))
assert
np
.
isclose
(
a
.
invert
(
a
.
vmax
,
ideal
=
True
),
np
.
nanmax
(
q
.
ravel
()))
assert
np
.
isclose
(
a
.
invert
(
a
.
vmin
,
ideal
=
True
),
np
.
nanmin
(
q
.
ravel
()))
assert
np
.
all
((
qs
!=
a
.
vnan
)
==
np
.
isfinite
(
q
))
return
a
,
q
qp
=
a
.
invert
(
a
.
convert
(
q
))
dqp
=
np
.
nanmax
(
np
.
abs
((
qp
-
q
).
ravel
()))
# print(f"max diff = {dqp}")
return
a
,
q
,
dqp
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment