import numpy as np def idl_lt(arr, val): """ The IDL "<" operator. Returns arr < val. """ if isinstance(arr, list) or isinstance(arr, np.ndarray): val_full = np.full(len(arr), val) arr = np.minimum(arr, val_full) elif isinstance(arr, float) or isinstance(arr, int): arr = min(arr, val) return arr def idl_gt(arr, val): """ The IDL ">" operator. Returns arr > val. """ if isinstance(arr, list) or isinstance(arr, np.ndarray): val_full = np.full(len(arr), val) arr = np.maximum(arr, val_full) elif isinstance(arr, float) or isinstance(arr, int): arr = max(arr, val) return arr def getrms(fluxes, errors): """ Keith Horne's method to estimate intrinsic variability. This is a modified version of getrms initially written by Jon Trump V = sum( (xi-u)^2 gi^2 ) / sum(gi) V = intrinsic variance -- Eqn (8) of sample char paper sig0 = sqrt(V) xi = flux measurement u = mean flux with optimal estimator -- Eqn (9) of sample char paper; note there was a typo on the error of u in the original Eqn (9) of the paper, missing a square sign gi = 1 / (1+e^2/V) e = flux error Iterate to solve for V and u. Note that e should be a combination of the standard flux error plus the spectrophotometry error. Parameters ---------- fluxes: list Input fluxes errors: list Input flux errors Returns ------- u: float Mean flux with optimal estimator uerror: float Error on u sig0: float Intrinsic variability sig0err: float Error on sig0 """ if (np.std(fluxes) == 0) or (len(fluxes) <= 1): return 0, -1 uguess = np.median(fluxes) varguess = idl_gt( np.std(fluxes)**2 - np.median(errors)**2, 0.05) while True: varold = varguess uold = uguess gi = 1 / ( 1 + (errors**2)/varold) varnew = np.sum( (fluxes - uold)**2 * gi**2 )/np.sum(gi) unew = np.sum( fluxes*gi )/np.sum(gi) varguess = varnew uguess = unew if ( np.abs(varold-varnew)/varnew <= 1e-5) or ( varnew < 1e-30 ): break gi = 1 / ( 1 + (errors**2)/varnew) varerror = varnew / np.sqrt((np.sum(gi)*np.sum((fluxes-unew)**2 * gi**3)/np.sum((fluxes-unew)**2 * gi**2) - np.sum(gi**2)/2.)) varnew_gt = idl_gt(varnew, 0) sig0 = np.sqrt(varnew_gt) sig0error = 0.5*np.sqrt(varnew_gt) / np.sqrt((np.sum(gi)*np.sum((fluxes-unew)**2 * gi**3)/np.sum((fluxes-unew)**2 * gi**2) - np.sum(gi**2)/2.)) uerror = varnew_gt/np.sum(gi) #there was a typo in the second-half of Eqn (9) in the sample char paper, i.e., missing a square sign. uerror = np.sqrt(uerror) return unew, uerror, sig0, sig0error