INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Make the plot with photometry performance predictions.
|
def makePlot(args):
"""
Make the plot with photometry performance predictions.
:argument args: command line arguments
"""
gmag=np.linspace(3.0,20.0,171)
vmini = args['vmini']
vmag=gmag-gminvFromVmini(vmini)
if args['eom']:
sigmaG = gMagnitudeErrorEoM(gmag)
sigmaGBp = bpMagnitudeErrorEoM(gmag, vmini)
sigmaGRp = rpMagnitudeErrorEoM(gmag, vmini)
yminmax = (1.0-4,0.1)
else:
sigmaG = gMagnitudeError(gmag)
sigmaGBp = bpMagnitudeError(gmag, vmini)
sigmaGRp = rpMagnitudeError(gmag, vmini)
yminmax = (1.0-4,1)
fig=plt.figure(figsize=(10,6.5))
if (args['vmagAbscissa']):
plt.semilogy(vmag, sigmaG, 'k', label='$\\sigma_G$')
plt.semilogy(vmag, sigmaGBp, 'b', label='$\\sigma_{G_\\mathrm{BP}}$'+' for $(V-I)={0}$'.format(vmini))
plt.semilogy(vmag, sigmaGRp, 'r', label='$\\sigma_{G_\\mathrm{RP}}$'+' for $(V-I)={0}$'.format(vmini))
plt.xlim((6,20))
#plt.ylim(yminmax)
plt.legend(loc=0)
plt.xlabel('$V$ [mag]')
else:
ax=fig.add_subplot(111)
plt.semilogy(gmag, sigmaG, 'k', label='$\\sigma_G$')
plt.semilogy(gmag, sigmaGBp, 'b', label='$\\sigma_{G_\\mathrm{BP}}$'+' for $(V-I)={0}$'.format(vmini))
plt.semilogy(gmag, sigmaGRp, 'r', label='$\\sigma_{G_\\mathrm{RP}}$'+' for $(V-I)={0}$'.format(vmini))
plt.xlim((6,20))
#plt.ylim(yminmax)
plt.legend(loc=0)
plt.xlabel('$G$ [mag]')
plt.xticks(np.arange(6,20,2))
ax = plt.gca().yaxis
#ax.set_major_formatter(matplotlib.ticker.ScalarFormatter())
#plt.ticklabel_format(axis='y',style='plain')
plt.grid(which='both')
plt.ylabel('Photometric error [mag]')
if args['eom']:
plt.title('End-of-mission mean photometry: sky averaged errors for $(V-I)={0}$'.format(vmini), fontsize=14)
else:
plt.title('Single-FoV-transit photometry: sky averaged errors for $(V-I)={0}$'.format(vmini), fontsize=14)
basename = 'PhotometricErrors'
if (args['pdfOutput']):
plt.savefig(basename+'.pdf')
elif (args['pngOutput']):
plt.savefig(basename+'.png')
else:
plt.show()
|
Calculate the value for the parameter z in the formula for parallax and G magnitude errors as a function of G and ( V - I ).
|
def calcZ(G):
"""
Calculate the value for the parameter z in the formula for parallax and G magnitude errors as a
function of G and (V-I).
Parameters
----------
G - Value of G-band magnitude.
Returns
-------
Value of z.
"""
gatefloor=power(10.0,0.4*(12.0-15.0))
if isscalar(G):
result=amax((gatefloor,power(10.0,0.4*(G-15.0))))
else :
result=power(10.0,0.4*(G-15.0))
indices=(result<gatefloor)
result[indices]=gatefloor
return result
|
Calculate the value for the parameter z in the formula for the BP and RP magnitude errors as a function of G and ( V - I ).
|
def calcZBpRp(G):
"""
Calculate the value for the parameter z in the formula for the BP and RP magnitude errors as a
function of G and (V-I).
Parameters
----------
G - Value of G-band magnitude.
Returns
-------
Value of z for BP/RP.
"""
gatefloor=power(10.0,0.4*(11.0-15.0))
if isscalar(G):
result=amax((gatefloor,power(10.0,0.4*(G-15.0))))
else :
result=power(10.0,0.4*(G-15.0))
indices=(result<gatefloor)
result[indices]=gatefloor
return result
|
Calculate the value of z in the formula for the parallax errors. In this case assume gating starts at G = 13. 3 ( to simulate bright star worst performance )
|
def calcZAltStartGate(G):
"""
Calculate the value of z in the formula for the parallax errors. In this case assume gating starts at
G=13.3 (to simulate bright star worst performance)
Parameters
----------
G - Value of G-band magnitude.
Returns
-------
Value of z.
"""
gatefloor=power(10.0,0.4*(13.3-15.0))
if isscalar(G):
result=amax((gatefloor,power(10.0,0.4*(G-15.0))))
else :
result=power(10.0,0.4*(G-15.0))
indices=(result<gatefloor)
result[indices]=gatefloor
return result
|
Returns the number of transits across the Gaia focal plane averaged over ecliptic longitude.
|
def averageNumberOfTransits(beta):
"""
Returns the number of transits across the Gaia focal plane averaged over ecliptic longitude.
Parameters
----------
beta - Value(s) of the Ecliptic latitude.
Returns
-------
Average number of transits for the input values of beta.
"""
indices = array(floor(abs(sin(beta))*_numStepsSinBeta), dtype=int)
indices[(indices==_numStepsSinBeta)] = _numStepsSinBeta-1
return _averageTransitNumber[indices]
|
Calculate the angular distance between pairs of sky coordinates.
|
def angularDistance(phi1, theta1, phi2, theta2):
"""
Calculate the angular distance between pairs of sky coordinates.
Parameters
----------
phi1 : float
Longitude of first coordinate (radians).
theta1 : float
Latitude of first coordinate (radians).
phi2 : float
Longitude of second coordinate (radians).
theta2 : float
Latitude of second coordinate (radians).
Returns
-------
Angular distance in radians.
"""
# Formula below is more numerically stable than arccos( sin(theta1)*sin(theta2) +
# cos(phi2-phi1)*cos(theta1)*cos(theta2) )
# See: https://en.wikipedia.org/wiki/Great-circle_distance
return arctan( sqrt((cos(theta2)*sin(phi2-phi1))**2 +
(cos(theta1)*sin(theta2)-sin(theta1)*cos(theta2)*cos(phi2-phi1))**2) / (sin(theta1)*sin(theta2) +
cos(phi2-phi1)*cos(theta1)*cos(theta2)) )
|
Rotates Cartesian coordinates from one reference system to another using the rotation matrix with which the class was initialized. The inputs can be scalars or 1 - dimensional numpy arrays.
|
def transformCartesianCoordinates(self, x, y, z):
"""
Rotates Cartesian coordinates from one reference system to another using the rotation matrix with
which the class was initialized. The inputs can be scalars or 1-dimensional numpy arrays.
Parameters
----------
x - Value of X-coordinate in original reference system
y - Value of Y-coordinate in original reference system
z - Value of Z-coordinate in original reference system
Returns
-------
xrot - Value of X-coordinate after rotation
yrot - Value of Y-coordinate after rotation
zrot - Value of Z-coordinate after rotation
"""
xrot, yrot, zrot = dot(self.rotationMatrix,[x,y,z])
return xrot, yrot, zrot
|
Converts sky coordinates from one reference system to another making use of the rotation matrix with which the class was initialized. Inputs can be scalars or 1 - dimensional numpy arrays.
|
def transformSkyCoordinates(self, phi, theta):
"""
Converts sky coordinates from one reference system to another, making use of the rotation matrix with
which the class was initialized. Inputs can be scalars or 1-dimensional numpy arrays.
Parameters
----------
phi - Value of the azimuthal angle (right ascension, longitude) in radians.
theta - Value of the elevation angle (declination, latitude) in radians.
Returns
-------
phirot - Value of the transformed azimuthal angle in radians.
thetarot - Value of the transformed elevation angle in radians.
"""
r=ones_like(phi)
x, y, z = sphericalToCartesian(r, phi, theta)
xrot, yrot, zrot = self.transformCartesianCoordinates(x, y, z)
r, phirot, thetarot = cartesianToSpherical(xrot, yrot, zrot)
return phirot, thetarot
|
Converts proper motions from one reference system to another using the prescriptions in section 1. 5. 3 of the Hipparcos Explanatory Volume 1 ( equations 1. 5. 18 1. 5. 19 ).
|
def transformProperMotions(self, phi, theta, muphistar, mutheta):
"""
Converts proper motions from one reference system to another, using the prescriptions in section
1.5.3 of the Hipparcos Explanatory Volume 1 (equations 1.5.18, 1.5.19).
Parameters
----------
phi - The longitude-like angle of the position of the source (radians).
theta - The latitude-like angle of the position of the source (radians).
muphistar - Value of the proper motion in the longitude-like angle, multiplied by cos(latitude).
mutheta - Value of the proper motion in the latitude-like angle.
Returns
-------
muphistarrot - Value of the transformed proper motion in the longitude-like angle (including the
cos(latitude) factor).
muthetarot - Value of the transformed proper motion in the latitude-like angle.
"""
c, s = self._getJacobian(phi,theta)
return c*muphistar+s*mutheta, c*mutheta-s*muphistar
|
Converts the sky coordinate errors from one reference system to another including the covariance term. Equations ( 1. 5. 4 ) and ( 1. 5. 20 ) from section 1. 5 in the Hipparcos Explanatory Volume 1 are used.
|
def transformSkyCoordinateErrors(self, phi, theta, sigPhiStar, sigTheta, rhoPhiTheta=0):
"""
Converts the sky coordinate errors from one reference system to another, including the covariance
term. Equations (1.5.4) and (1.5.20) from section 1.5 in the Hipparcos Explanatory Volume 1 are used.
Parameters
----------
phi - The longitude-like angle of the position of the source (radians).
theta - The latitude-like angle of the position of the source (radians).
sigPhiStar - Standard error in the longitude-like angle of the position of the source (radians or
sexagesimal units, including cos(latitude) term)
sigTheta - Standard error in the latitude-like angle of the position of the source (radians or
sexagesimal units)
Keywords (optional)
-------------------
rhoPhiTheta - Correlation coefficient of the position errors. Set to zero if this keyword is not
provided.
Retuns
------
sigPhiRotStar - The transformed standard error in the longitude-like angle (including
cos(latitude) factor)
sigThetaRot - The transformed standard error in the latitude-like angle.
rhoPhiThetaRot - The transformed correlation coefficient.
"""
if isscalar(rhoPhiTheta) and not isscalar(sigTheta):
rhoPhiTheta=zeros_like(sigTheta)+rhoPhiTheta
c, s = self._getJacobian(phi,theta)
cSqr = c*c
sSqr = s*s
covar = sigPhiStar*sigTheta*rhoPhiTheta
varPhiStar = sigPhiStar*sigPhiStar
varTheta = sigTheta*sigTheta
varPhiStarRot = cSqr*varPhiStar+sSqr*varTheta+2.0*covar*c*s
varThetaRot = sSqr*varPhiStar+cSqr*varTheta-2.0*covar*c*s
covarRot = (cSqr-sSqr)*covar+c*s*(varTheta-varPhiStar)
return sqrt(varPhiStarRot), sqrt(varThetaRot), covarRot/sqrt(varPhiStarRot*varThetaRot)
|
Converts the proper motion errors from one reference system to another including the covariance term. Equations ( 1. 5. 4 ) and ( 1. 5. 20 ) from section 1. 5 in the Hipparcos Explanatory Volume 1 are used.
|
def transformProperMotionErrors(self, phi, theta, sigMuPhiStar, sigMuTheta, rhoMuPhiMuTheta=0):
"""
Converts the proper motion errors from one reference system to another, including the covariance
term. Equations (1.5.4) and (1.5.20) from section 1.5 in the Hipparcos Explanatory Volume 1 are used.
Parameters
----------
phi - The longitude-like angle of the position of the source (radians).
theta - The latitude-like angle of the position of the source (radians).
sigMuPhiStar - Standard error in the proper motion in the longitude-like direction (including
cos(latitude) factor).
sigMuTheta - Standard error in the proper motion in the latitude-like direction.
Keywords (optional)
-------------------
rhoMuPhiMuTheta - Correlation coefficient of the proper motion errors. Set to zero if this
keyword is not provided.
Retuns
------
sigMuPhiRotStar - The transformed standard error in the proper motion in the longitude direction
(including cos(latitude) factor).
sigMuThetaRot - The transformed standard error in the proper motion in the longitude direction.
rhoMuPhiMuThetaRot - The transformed correlation coefficient.
"""
return self.transformSkyCoordinateErrors(phi, theta, sigMuPhiStar, sigMuTheta,
rhoPhiTheta=rhoMuPhiMuTheta)
|
Transform the astrometric covariance matrix to its representation in the new coordinate system.
|
def transformCovarianceMatrix(self, phi, theta, covmat):
"""
Transform the astrometric covariance matrix to its representation in the new coordinate system.
Parameters
----------
phi - The longitude-like angle of the position of the source (radians).
theta - The latitude-like angle of the position of the source (radians).
covmat - Covariance matrix (5x5) of the astrometric parameters.
Returns
-------
covmat_rot - Covariance matrix in its representation in the new coordinate system.
"""
c, s = self._getJacobian(phi,theta)
jacobian = identity(5)
jacobian[0][0]=c
jacobian[1][1]=c
jacobian[3][3]=c
jacobian[4][4]=c
jacobian[0][1]=s
jacobian[1][0]=-s
jacobian[3][4]=s
jacobian[4][3]=-s
return dot( dot(jacobian, covmat), jacobian.T )
|
Calculates the Jacobian for the transformation of the position errors and proper motion errors between coordinate systems. This Jacobian is also the rotation matrix for the transformation of proper motions. See section 1. 5. 3 of the Hipparcos Explanatory Volume 1 ( equation 1. 5. 20 ). This matrix has the following form:
|
def _getJacobian(self, phi, theta):
"""
Calculates the Jacobian for the transformation of the position errors and proper motion errors
between coordinate systems. This Jacobian is also the rotation matrix for the transformation of
proper motions. See section 1.5.3 of the Hipparcos Explanatory Volume 1 (equation 1.5.20). This
matrix has the following form:
| c s |
J = | |
| -s c |
Parameters
----------
phi - The longitude-like angle of the position of the source (radians).
theta - The latitude-like angle of the position of the source (radians).
Returns
-------
c, s - The Jacobian matrix elements c and s corresponding to (phi, theta) and the currently
desired coordinate system transformation.
"""
p, q, r = normalTriad(phi, theta)
# zRot = z-axis of new coordinate system expressed in terms of old system
zRot = self.rotationMatrix[2,:]
if (p.ndim == 2):
zRotAll = tile(zRot, p.shape[1]).reshape(p.shape[1],3)
pRot = cross(zRotAll, r.T)
normPRot = norm(pRot,axis=1)
for i in range(pRot.shape[0]):
pRot[i] = pRot[i]/normPRot[i]
c = zeros(pRot.shape[0])
s = zeros(pRot.shape[0])
for i in range(pRot.shape[0]):
c[i] = dot(pRot[i], p.T[i])
s[i] = dot(pRot[i], q.T[i])
return c, s
else:
pRot = cross(zRot, r.T)
pRot = pRot/norm(pRot)
return dot(pRot,p), dot(pRot,q)
|
Propagate the position of a source from the reference epoch t0 to the new epoch t1.
|
def propagate_astrometry(self, phi, theta, parallax, muphistar, mutheta, vrad, t0, t1):
"""
Propagate the position of a source from the reference epoch t0 to the new epoch t1.
Parameters
----------
phi : float
Longitude at reference epoch (radians).
theta : float
Latitude at reference epoch (radians).
parallax : float
Parallax at the reference epoch (mas).
muphistar : float
Proper motion in longitude (including cos(latitude) term) at reference epoch (mas/yr).
mutheta : float
Proper motion in latitude at reference epoch (mas/yr).
vrad : float
Radial velocity at reference epoch (km/s).
t0 : float
Reference epoch (Julian years).
t1 : float
New epoch (Julian years).
Returns
-------
Astrometric parameters, including the "radial proper motion" (NOT the radial velocity), at the new epoch.
phi1, theta1, parallax1, muphistar1, mutheta1, mur1 = epoch_prop_pos(..., t0, t1)
"""
t = t1-t0
p0, q0, r0 = normalTriad(phi, theta)
# Convert input data to units of radians and Julian year. Use ICRS coordinate names internally to
# avoid errors in translating the formulae to code.
pmra0 = muphistar*self.mastorad
pmdec0 = mutheta*self.mastorad
plx0 = parallax*self.mastorad
pmr0 = vrad*parallax/auKmYearPerSec*self.mastorad
pmtot0sqr = (muphistar**2 + mutheta**2) * self.mastorad**2
# Proper motion vector
pmvec0 = pmra0*p0+pmdec0*q0
f = (1 + 2*pmr0*t + (pmtot0sqr+pmr0**2)*t**2)**(-0.5)
u = (r0*(1+pmr0*t) + pmvec0*t)*f
_, phi1, theta1 = cartesianToSpherical(u[0], u[1], u[2])
parallax1 = parallax*f
pmr1 = (pmr0+(pmtot0sqr + pmr0**2)*t)*f**2
pmvec1 = (pmvec0*(1+pmr0*t) - r0*pmr0**2*t)*f**3
p1, q1, r1 = normalTriad(phi1, theta1)
muphistar1 = sum(p1*pmvec1/self.mastorad, axis=0)
mutheta1 = sum(q1*pmvec1/self.mastorad, axis =0)
murad1 = pmr1/self.mastorad
return phi1, theta1, parallax1, muphistar1, mutheta1, murad1
|
Propagate the position of a source from the reference epoch t0 to the new epoch t1.
|
def propagate_pos(self, phi, theta, parallax, muphistar, mutheta, vrad, t0, t1):
"""
Propagate the position of a source from the reference epoch t0 to the new epoch t1.
Parameters
----------
phi : float
Longitude at reference epoch (radians).
theta : float
Latitude at reference epoch (radians).
parallax : float
Parallax at the reference epoch (mas).
muphistar : float
Proper motion in longitude (including cos(latitude) term) at reference epoch (mas/yr).
mutheta : float
Proper motion in latitude at reference epoch (mas/yr).
vrad : float
Radial velocity at reference epoch (km/s).
t0 : float
Reference epoch (Julian years).
t1 : float
New epoch (Julian years).
Returns
-------
Coordinates phi and theta at new epoch (in radians)
"""
phi1, theta1, parallax1, muphistar1, mutheta1, vrad1 = self.epoch_prop_astrometry(phi, theta, parallax, muphistar, mutheta, vrad, t0, t1)
return phi1, theta1
|
Propagate the covariance matrix of the astrometric parameters and radial proper motion of a source from epoch t0 to epoch t1.
|
def propagate_astrometry_and_covariance_matrix(self, a0, c0, t0, t1):
"""
Propagate the covariance matrix of the astrometric parameters and radial proper motion of a
source from epoch t0 to epoch t1.
Code based on the Hipparcos Fortran implementation by Lennart Lindegren.
Parameters
----------
a0 : array_like
6-element vector: (phi, theta, parallax, muphistar, mutheta, vrad) in units of (radians,
radians, mas, mas/yr, mas/yr, km/s). Shape of a should be (6,) or (6,N), with N the number of
sources for which the astrometric parameters are provided.
c0 : array_like
Covariance matrix stored in a 6x6 element array. This can be constructed from the columns
listed in the Gaia catalogue. The units are [mas^2, mas^2/yr, mas^2/yr^2] for the various
elements. Note that the elements in the 6th row and column should be:
c[6,i]=c[i,6]=c[i,3]*vrad/auKmYearPerSec for i=1,..,5 and
c[6,6]=c[3,3]*(vrad^2+vrad_error^2)/auKmYearPerSec^2+(parallax*vrad_error/auKmYearPerSec)^2
Shape of c0 should be (6,6) or (N,6,6).
t0 : float
Reference epoch (Julian years).
t1 : float
New epoch (Julian years).
Returns
-------
Astrometric parameters, including the "radial proper motion" (NOT the radial velocity), and
covariance matrix at the new epoch as a 2D matrix with the new variances on the diagional and the
covariance in the off-diagonal elements.
"""
zero, one, two, three = 0, 1, 2, 3
tau = t1-t0
# Calculate the normal triad [p0 q0 r0] at t0
p0, q0, r0 = normalTriad(a0[0], a0[1])
# Convert to internal units (radians, Julian year)
par0 = a0[2]*self.mastorad
pma0 = a0[3]*self.mastorad
pmd0 = a0[4]*self.mastorad
pmr0 = a0[5]*a0[2]/auKmYearPerSec*self.mastorad
# Proper motion vector
pmvec0 = pma0*p0+pmd0*q0
# Auxiliary quantities
tau2 = tau*tau
pm02 = pma0**2 + pmd0**2
w = one + pmr0*tau
f2 = one/(one + two*pmr0*tau + (pm02+pmr0**2)*tau2)
f = sqrt(f2)
f3 = f2*f
f4 = f2*f2
# Position vector and parallax at t1
u = (r0*w + pmvec0*tau)*f
_, ra, dec = cartesianToSpherical(u[0], u[1], u[2])
par = par0*f
# Proper motion vector and radial proper motion at t1
pmvec = (pmvec0*(one+pmr0*tau) - r0*pmr0**2*tau)*f3
pmr = (pmr0+(pm02 + pmr0**2)*tau)*f2
# Normal triad at t1
p, q, r = normalTriad(ra, dec)
# Convert parameters at t1 to external units (mas, Julian year)
pma = sum(p*pmvec, axis=0)
pmd = sum(q*pmvec, axis =0)
a = zeros_like(a0)
a[0] = ra
a[1] = dec
a[2] = par/self.mastorad
a[3] = pma/self.mastorad
a[4] = pmd/self.mastorad
a[5] = pmr/self.mastorad
# Auxiliary quantities for the partial derivatives
pmz = pmvec0*f - three*pmvec*w
pp0 = sum(p*p0, axis=0)
pq0 = sum(p*q0, axis=0)
pr0 = sum(p*r0, axis=0)
qp0 = sum(q*p0, axis=0)
qq0 = sum(q*q0, axis=0)
qr0 = sum(q*r0, axis=0)
ppmz = sum(p*pmz, axis=0)
qpmz = sum(q*pmz, axis=0)
J = zeros_like(c0)
if (c0.ndim==2):
J = J[newaxis,:,:]
# Partial derivatives
J[:,0,0] = pp0*w*f - pr0*pma0*tau*f
J[:,0,1] = pq0*w*f - pr0*pmd0*tau*f
J[:,0,2] = zero
J[:,0,3] = pp0*tau*f
J[:,0,4] = pq0*tau*f
J[:,0,5] = -pma*tau2
J[:,1,0] = qp0*w*f - qr0*pma0*tau*f
J[:,1,1] = qq0*w*f - qr0*pmd0*tau*f
J[:,1,2] = zero
J[:,1,3] = qp0*tau*f
J[:,1,4] = qq0*tau*f
J[:,1,5] = -pmd*tau2
J[:,2,0] = zero
J[:,2,1] = zero
J[:,2,2] = f
J[:,2,3] = -par*pma0*tau2*f2
J[:,2,4] = -par*pmd0*tau2*f2
J[:,2,5] = -par*w*tau*f2
J[:,3,0] = -pp0*pm02*tau*f3 - pr0*pma0*w*f3
J[:,3,1] = -pq0*pm02*tau*f3 - pr0*pmd0*w*f3
J[:,3,2] = zero
J[:,3,3] = pp0*w*f3 - two*pr0*pma0*tau*f3 - three*pma*pma0*tau2*f2
J[:,3,4] = pq0*w*f3 - two*pr0*pmd0*tau*f3 - three*pma*pmd0*tau2*f2
J[:,3,5] = ppmz*tau*f2
J[:,4,0] = -qp0*pm02*tau*f3 - qr0*pma0*w*f3
J[:,4,1] = -qq0*pm02*tau*f3 - qr0*pmd0*w*f3
J[:,4,2] = zero
J[:,4,3] = qp0*w*f3 - two*qr0*pma0*tau*f3 - three*pmd*pma0*tau2*f2
J[:,4,4] = qq0*w*f3 - two*qr0*pmd0*tau*f3 - three*pmd*pmd0*tau2*f2
J[:,4,5] = qpmz*tau*f2
J[:,5,0] = zero
J[:,5,1] = zero
J[:,5,2] = zero
J[:,5,3] = two*pma0*w*tau*f4
J[:,5,4] = two*pmd0*w*tau*f4
J[:,5,5] = (w**2 - pm02*tau2)*f4
JT = zeros_like(J)
for i in range(J.shape[0]):
JT[i] = J[i].T
if (c0.ndim==2):
c = matmul(J,matmul(c0[newaxis,:,:],JT))
else:
c = matmul(J,matmul(c0,JT))
return a, squeeze(c)
|
Make the plot with parallax horizons. The plot shows V - band magnitude vs distance for a number of spectral types and over the range 5. 7<G<20. In addition a set of crudely drawn contours show the points where 0. 1 1 and 10 per cent relative parallax accracy are reached.
|
def makePlot(args):
"""
Make the plot with parallax horizons. The plot shows V-band magnitude vs distance for a number of
spectral types and over the range 5.7<G<20. In addition a set of crudely drawn contours show the points
where 0.1, 1, and 10 per cent relative parallax accracy are reached.
Parameters
----------
args - Command line arguments.
"""
distances = 10.0**np.linspace(1,6,10001)
spts=['B0V', 'A0V', 'F0V', 'G0V', 'K0V', 'K4V', 'K1III']
twokmsRV = []
twokmsV = []
vabsTwokms = []
fivekmsRV = []
fivekmsV = []
vabsFivekms = []
tenkmsRV = []
tenkmsV = []
vabsTenkms = []
fig=plt.figure(figsize=(11,7.8))
deltaHue = 240.0/(len(spts)-1)
hues = (240.0-np.arange(len(spts))*deltaHue)/360.0
hsv=np.zeros((1,1,3))
hsv[0,0,1]=1.0
hsv[0,0,2]=0.9
for hue,spt in zip(hues, spts):
hsv[0,0,0]=hue
vmags = vabsFromSpt(spt)+5.0*np.log10(distances)-5.0
vmini=vminiFromSpt(spt)
grvsmags = vmags - vminGrvsFromVmini(vmini)
rvError = vradErrorSkyAvg(vmags, spt)
observed = (grvsmags>=5.7) & (grvsmags<=16.1)
rvError = rvError[observed]
# Identify the points where the relative parallax accuracy is 0.1, 1, or 10 per cent.
if (rvError.min()<=2.0):
index = len(rvError[rvError<=2.0])-1
twokmsRV.append(distances[observed][index])
twokmsV.append(vmags[observed][index])
vabsTwokms.append(vabsFromSpt(spt))
if (rvError.min()<=5.0):
index = len(rvError[rvError<=5.0])-1
fivekmsRV.append(distances[observed][index])
fivekmsV.append(vmags[observed][index])
vabsFivekms.append(vabsFromSpt(spt))
if (rvError.min()<=10.0):
index = len(rvError[rvError<=10.0])-1
tenkmsRV.append(distances[observed][index])
tenkmsV.append(vmags[observed][index])
vabsTenkms.append(vabsFromSpt(spt))
plt.semilogx(distances[observed], vmags[observed], '-', label=spt, color=hsv_to_rgb(hsv)[0,0,:])
plt.text(distances[observed][-1], vmags[observed][-1], spt, horizontalalignment='center',
verticalalignment='bottom', fontsize=14)
# Draw the "contours" of constant radial velocity accuracy.
twokmsRV = np.array(twokmsRV)
twokmsV = np.array(twokmsV)
indices = np.argsort(vabsTwokms)
plt.semilogx(twokmsRV[indices],twokmsV[indices],'k--')
plt.text(twokmsRV[indices][-1]*0.8,twokmsV[indices][-1],"$2$ km s$^{-1}$", ha='right', size=16,
bbox=dict(boxstyle="round, pad=0.3", ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0),))
fivekmsRV = np.array(fivekmsRV)
fivekmsV = np.array(fivekmsV)
indices = np.argsort(vabsFivekms)
plt.semilogx(fivekmsRV[indices],fivekmsV[indices],'k--')
plt.text(fivekmsRV[indices][-1]*0.8,fivekmsV[indices][-1],"$5$ km s$^{-1}$", ha='right', size=16,
bbox=dict(boxstyle="round, pad=0.3", ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0),))
tenkmsRV = np.array(tenkmsRV)
tenkmsV = np.array(tenkmsV)
indices = np.argsort(vabsTenkms)
plt.semilogx(tenkmsRV[indices],tenkmsV[indices],'k--')
plt.text(tenkmsRV[indices][-1]*0.8,tenkmsV[indices][-1]+0.5,"$10$ km s$^{-1}$", ha='right', size=16,
bbox=dict(boxstyle="round, pad=0.3", ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0),))
plt.title('Radial velocity accuracy horizons ($A_V=0$)')
plt.xlabel('Distance [pc]')
plt.ylabel('V')
plt.grid()
#leg=plt.legend(loc=4, fontsize=14, labelspacing=0.5)
plt.ylim(5,20)
basename='RadialVelocityHorizons'
if (args['pdfOutput']):
plt.savefig(basename+'.pdf')
elif (args['pngOutput']):
plt.savefig(basename+'.png')
else:
plt.show()
|
Look up the numerical factors to apply to the sky averaged parallax error in order to obtain error values for a given astrometric parameter taking the Ecliptic latitude and the number of transits into account.
|
def errorScalingFactor(observable, beta):
"""
Look up the numerical factors to apply to the sky averaged parallax error in order to obtain error
values for a given astrometric parameter, taking the Ecliptic latitude and the number of transits into
account.
Parameters
----------
observable - Name of astrometric observable (one of: alphaStar, delta, parallax, muAlphaStar, muDelta)
beta - Values(s) of the Ecliptic latitude.
Returns
-------
Numerical factors to apply to the errors of the given observable.
"""
if isscalar(beta):
index=int(floor(abs(sin(beta))*_numStepsSinBeta))
if index == _numStepsSinBeta:
return _astrometricErrorFactors[observable][_numStepsSinBeta-1]
else:
return _astrometricErrorFactors[observable][index]
else:
indices = array(floor(abs(sin(beta))*_numStepsSinBeta), dtype=int)
indices[(indices==_numStepsSinBeta)] = _numStepsSinBeta-1
return _astrometricErrorFactors[observable][indices]
|
Calculate the sky averaged parallax error from G and ( V - I ).
|
def parallaxErrorSkyAvg(G, vmini, extension=0.0):
"""
Calculate the sky averaged parallax error from G and (V-I).
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The parallax error in micro-arcseconds.
"""
factor = errorScalingMissionLength(extension, -0.5)
z=calcZ(G)
return sqrt(-1.631 + 680.766*z + 32.732*z*z)*(0.986 + (1.0 - 0.986)*vmini)*factor
|
Calculate the minimum parallax error from G and ( V - I ). This correspond to the sky regions with the smallest astrometric errors. At the bright end the parallax error is at least 14 muas due to the gating scheme.
|
def parallaxMinError(G, vmini, extension=0.0):
"""
Calculate the minimum parallax error from G and (V-I). This correspond to the sky regions with the
smallest astrometric errors. At the bright end the parallax error is at least 14 muas due to the
gating scheme.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The minimum parallax error in micro-arcseconds.
"""
return _astrometricErrorFactors["parallax"].min()*parallaxErrorSkyAvg(G, vmini, extension=extension)
|
Calculate the maximum parallax error from G and ( V - I ). This correspond to the sky regions with the largest astrometric errors. At the bright end the parallax error is at least 14 muas due to the gating scheme.
|
def parallaxMaxError(G, vmini, extension=0.0):
"""
Calculate the maximum parallax error from G and (V-I). This correspond to the sky regions with the
largest astrometric errors. At the bright end the parallax error is at least 14 muas due to the
gating scheme.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The maximum parallax error in micro-arcseconds.
"""
errors = _astrometricErrorFactors["parallax"].max()*parallaxErrorSkyAvg(G, vmini, extension=extension)
indices = (errors<_parallaxErrorMaxBright)
errors[indices]=_parallaxErrorMaxBright
return errors
|
Calculate the sky averaged parallax error from G and ( V - I ). In this case assume gating starts at G = 13. 3 ( to simulate bright star worst performance )
|
def parallaxErrorSkyAvgAltStartGate(G, vmini, extension=0.0):
"""
Calculate the sky averaged parallax error from G and (V-I). In this case assume gating starts at G=13.3
(to simulate bright star worst performance)
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The parallax error in micro-arcseconds.
"""
factor = errorScalingMissionLength(extension, -0.5)
z=calcZAltStartGate(G)
return sqrt(-1.631 + 680.766*z + 32.732*z*z)*(0.986 + (1.0 - 0.986)*vmini)*factor
|
Calculate the sky averaged position errors from G and ( V - I ).
|
def positionErrorSkyAvg(G, vmini, extension=0.0):
"""
Calculate the sky averaged position errors from G and (V-I).
NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR
SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The error in alpha* and the error in delta, in that order, in micro-arcsecond.
"""
parallaxError = parallaxErrorSkyAvg(G, vmini, extension=extension)
return _scalingForPositions['AlphaStar']*parallaxError, \
_scalingForPositions['Delta']*parallaxError
|
Calculate the minimum position errors from G and ( V - I ). These correspond to the sky regions with the smallest astrometric errors.
|
def positionMinError(G, vmini, extension=0.0):
"""
Calculate the minimum position errors from G and (V-I). These correspond to the sky regions with the
smallest astrometric errors.
NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR
SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The minimum error in alpha* and the error in delta, in that order, in micro-arcsecond.
"""
parallaxError = parallaxErrorSkyAvg(G, vmini, extension=extension)
return _astrometricErrorFactors['alphaStar'].min()*parallaxError, \
_astrometricErrorFactors['delta'].min()*parallaxError
|
Calculate the maximum position errors from G and ( V - I ). These correspond to the sky regions with the largest astrometric errors.
|
def positionMaxError(G, vmini, extension=0.0):
"""
Calculate the maximum position errors from G and (V-I). These correspond to the sky regions with the
largest astrometric errors.
NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR
SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The maximum error in alpha* and the error in delta, in that order, in micro-arcsecond.
"""
parallaxError = parallaxErrorSkyAvg(G, vmini, extension)
return _astrometricErrorFactors['alphaStar'].max()*parallaxError, \
_astrometricErrorFactors['delta'].max()*parallaxError
|
Calculate the position errors from G and ( V - I ) and the Ecliptic latitude beta of the source.
|
def positionError(G, vmini, beta, extension=0.0):
"""
Calculate the position errors from G and (V-I) and the Ecliptic latitude beta of the source.
NOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR
SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
beta - Value(s) of the Ecliptic latitude.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The error in alpha* and the error in delta, in that order, in micro-arcsecond.
"""
parallaxError = parallaxErrorSkyAvg(G, vmini, extension=extension)
return errorScalingFactor('alphaStar',beta)*parallaxError, \
errorScalingFactor('delta',beta)*parallaxError
|
Calculate the minimum proper motion errors from G and ( V - I ). These correspond to the sky regions with the smallest astrometric errors.
|
def properMotionMinError(G, vmini, extension=0.0):
"""
Calculate the minimum proper motion errors from G and (V-I). These correspond to the sky regions with
the smallest astrometric errors.
NOTE! THE ERRORS ARE FOR PROPER MOTIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE
YOUR SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The minimum error in mu_alpha* and the error in mu_delta, in that order, in micro-arcsecond/year.
"""
factor = errorScalingMissionLength(extension, -1.5)
parallaxError = parallaxErrorSkyAvg(G, vmini)*factor
return _astrometricErrorFactors['muAlphaStar'].min()*parallaxError, \
_astrometricErrorFactors['muDelta'].min()*parallaxError
|
Calculate the maximum proper motion errors from G and ( V - I ). These correspond to the sky regions with the largest astrometric errors.
|
def properMotionMaxError(G, vmini, extension=0.0):
"""
Calculate the maximum proper motion errors from G and (V-I). These correspond to the sky regions with
the largest astrometric errors.
NOTE! THE ERRORS ARE FOR PROPER MOTIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE
YOUR SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The maximum error in mu_alpha* and the error in mu_delta, in that order, in micro-arcsecond/year.
"""
factor = errorScalingMissionLength(extension, -1.5)
parallaxError = parallaxErrorSkyAvg(G, vmini)*factor
indices = (parallaxError<_parallaxErrorMaxBright)
parallaxError[indices] = _parallaxErrorMaxBright
return _astrometricErrorFactors['muAlphaStar'].max()*parallaxError, \
_astrometricErrorFactors['muDelta'].max()*parallaxError
|
Calculate the proper motion errors from G and ( V - I ) and the Ecliptic latitude beta of the source.
|
def properMotionError(G, vmini, beta, extension=0.0):
"""
Calculate the proper motion errors from G and (V-I) and the Ecliptic latitude beta of the source.
NOTE! THE ERRORS ARE FOR PROPER MOTIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE
YOUR SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
beta - Value(s) of the Ecliptic latitude.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The error in mu_alpha* and the error in mu_delta, in that order, in micro-arcsecond/year.
"""
factor = errorScalingMissionLength(extension, -1.5)
parallaxError = parallaxErrorSkyAvg(G, vmini)*factor
return errorScalingFactor('muAlphaStar',beta)*parallaxError, \
errorScalingFactor('muDelta',beta)*parallaxError
|
Calculate the sky averaged total proper motion error from G and ( V - I ). This refers to the error on the length of the proper motion vector.
|
def totalProperMotionErrorSkyAvg(G, vmini, extension=0.0):
"""
Calculate the sky averaged total proper motion error from G and (V-I). This refers to the error on the
length of the proper motion vector.
NOTE! THE ERRORS ARE FOR PROPER MOTIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE
YOUR SIMULATED ASTROMETRY IS ALSO ON THE ICRS.
Parameters
----------
G - Value(s) of G-band magnitude.
vmini - Value(s) of (V-I) colour.
Keywords
--------
extension - Add this amount of years to the mission lifetime and scale the errors accordingly.
Returns
-------
The error on the total proper motion in micro-arcsecond/year.
"""
factor = errorScalingMissionLength(extension, -1.5)
parallaxError = parallaxErrorSkyAvg(G, vmini)*factor
return _scalingForProperMotions['Total']*parallaxError
|
Make the plot with parallax horizons. The plot shows V - band magnitude vs distance for a number of spectral types and over the range 5. 7<G<20. In addition a set of crudely drawn contours show the points where 0. 1 1 and 10 per cent relative parallax accracy are reached.
|
def makePlot(args):
"""
Make the plot with parallax horizons. The plot shows V-band magnitude vs distance for a number of
spectral types and over the range 5.7<G<20. In addition a set of crudely drawn contours show the points
where 0.1, 1, and 10 per cent relative parallax accracy are reached.
Parameters
----------
args - Command line arguments.
"""
distances = 10.0**np.linspace(1,6,10001)
av = args['extinction']
ai = 0.479*av #Cardelli et al R=3.1
spts = ['B0I', 'B1V', 'G2V', 'K4V', 'M0V', 'M6V', 'K1III', 'M0III']
pointOnePercD = []
pointOnePercV = []
onePercD = []
onePercV = []
tenPercD = []
tenPercV = []
vabsPointOnePerc = []
vabsOnePerc = []
vabsTenPerc = []
fig=plt.figure(figsize=(11,7.8))
deltaHue = 240.0/(len(spts)-1)
hues = (240.0-np.arange(len(spts))*deltaHue)/360.0
hsv=np.zeros((1,1,3))
hsv[0,0,1]=1.0
hsv[0,0,2]=0.9
for hue,spt in zip(hues, spts):
hsv[0,0,0]=hue
vmags = vabsFromSpt(spt)+5.0*np.log10(distances)-5.0+av
vmini=vminiFromSpt(spt)+av-ai
#gmags = gabsFromSpt(spt)+5.0*np.log10(distances)-5.0
gmags = vmags + gminvFromVmini(vmini)
relParErr = parallaxErrorSkyAvg(gmags,vmini)*distances/1.0e6
observed = (gmags>=5.7) & (gmags<=20.0)
relParErrObs = relParErr[observed]
# Identify the points where the relative parallax accuracy is 0.1, 1, or 10 per cent.
if (relParErrObs.min()<0.001):
index = len(relParErrObs[relParErrObs<=0.001])-1
pointOnePercD.append(distances[observed][index])
pointOnePercV.append(vmags[observed][index])
vabsPointOnePerc.append(vabsFromSpt(spt))
if (relParErrObs.min()<0.01):
index = len(relParErrObs[relParErrObs<=0.01])-1
onePercD.append(distances[observed][index])
onePercV.append(vmags[observed][index])
vabsOnePerc.append(vabsFromSpt(spt))
if (relParErrObs.min()<0.1):
index = len(relParErrObs[relParErrObs<=0.1])-1
tenPercD.append(distances[observed][index])
tenPercV.append(vmags[observed][index])
vabsTenPerc.append(vabsFromSpt(spt))
plt.semilogx(distances[observed], vmags[observed], '-', label=spt, color=hsv_to_rgb(hsv)[0,0,:])
if (spt=='B0I'):
plt.text(distances[observed][-1]-1.0e5, vmags[observed][-1], spt, horizontalalignment='right',
verticalalignment='bottom', fontsize=14)
else:
plt.text(distances[observed][-1], vmags[observed][-1], spt, horizontalalignment='center',
verticalalignment='bottom', fontsize=14)
# Draw the "contours" of constant relative parallax accuracy.
pointOnePercD = np.array(pointOnePercD)
pointOnePercV = np.array(pointOnePercV)
indices = np.argsort(vabsPointOnePerc)
plt.semilogx(pointOnePercD[indices],pointOnePercV[indices],'k--')
plt.text(pointOnePercD[indices][-1]*1.2,pointOnePercV[indices][-1]-2.5,"$0.1$\\%", ha='right', size=16,
bbox=dict(boxstyle="round, pad=0.3", ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0),))
onePercD = np.array(onePercD)
onePercV = np.array(onePercV)
indices = np.argsort(vabsOnePerc)
plt.semilogx(onePercD[indices],onePercV[indices],'k--')
plt.text(onePercD[indices][-1]*1.2,onePercV[indices][-1]-2.5,"$1$\\%", ha='right', size=16,
bbox=dict(boxstyle="round, pad=0.3", ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0),))
tenPercD = np.array(tenPercD)
tenPercV = np.array(tenPercV)
indices = np.argsort(vabsTenPerc)
plt.semilogx(tenPercD[indices],tenPercV[indices],'k--')
plt.text(tenPercD[indices][-1]*1.5,tenPercV[indices][-1]-2.5,"$10$\\%", ha='right', size=16,
bbox=dict(boxstyle="round, pad=0.3", ec=(0.0, 0.0, 0.0), fc=(1.0, 1.0, 1.0),))
plt.title('Parallax relative accuracy horizons ($A_V={0}$)'.format(av))
plt.xlabel('Distance [pc]')
plt.ylabel('V')
plt.grid()
#leg=plt.legend(loc=4, fontsize=14, labelspacing=0.5)
plt.ylim(5,26)
basename='ParallaxHorizons'
if (args['pdfOutput']):
plt.savefig(basename+'.pdf')
elif (args['pngOutput']):
plt.savefig(basename+'.png')
else:
plt.show()
|
Obtain ( V - I ) for the input spectral type.
|
def vminiFromSpt(spt):
"""
Obtain (V-I) for the input spectral type.
Parameters
----------
spt - String representing the spectral type of the star.
Returns
-------
The value of (V-I).
"""
if spt in _sptToVminiVabsDictionary:
return _sptToVminiVabsDictionary[spt][0]
else:
message="Unknown spectral type. Allowed values are: "
for key in _sptToVminiVabsDictionary.keys():
message += key+" "
raise Exception(message)
|
Obtain M_V ( absolute magnitude in V - band ) for the input spectral type.
|
def vabsFromSpt(spt):
"""
Obtain M_V (absolute magnitude in V-band) for the input spectral type.
Parameters
----------
spt - String representing the spectral type of the star.
Returns
-------
The value of M_V.
"""
if spt in _sptToVminiVabsDictionary:
return _sptToVminiVabsDictionary[spt][1]
else:
message="Unknown spectral type. Allowed values are: "
for key in _sptToVminiVabsDictionary.keys():
message += key+" "
raise Exception(message)
|
Obtain M_G ( absolute magnitude in G - band ) for the input spectral type.
|
def gabsFromSpt(spt):
"""
Obtain M_G (absolute magnitude in G-band) for the input spectral type.
Parameters
----------
spt - String representing the spectral type of the star.
Returns
-------
The value of M_G.
"""
if spt in _sptToVminiVabsDictionary:
return vabsFromSpt(spt) + gminvFromVmini(vminiFromSpt(spt))
else:
message="Unknown spectral type. Allowed values are: "
for key in _sptToVminiVabsDictionary.keys():
message += key+" "
raise Exception(message)
|
Plot relative parallax errors as a function of distance for stars of a given spectral type.
|
def makePlot(pdf=False, png=False):
"""
Plot relative parallax errors as a function of distance for stars of a given spectral type.
Parameters
----------
args - command line arguments
"""
logdistancekpc = np.linspace(-1,np.log10(20.0),100)
sptVabsAndVmini=OrderedDict([('K0V',(5.58,0.87)), ('G5V',(4.78,0.74)), ('G0V',(4.24,0.67)),
('F5V',(3.50,0.50)), ('F0V',(2.98,0.38)), ('RC',(0.8,1.0))])
lines={}
fig=plt.figure(figsize=(10,6.5))
currentAxis=plt.gca()
for spt in sptVabsAndVmini.keys():
vmag=sptVabsAndVmini[spt][0]+5.0*logdistancekpc+10.0
indices=(vmag>14) & (vmag<16)
gmag=vmag+gminvFromVmini(sptVabsAndVmini[spt][1])
parerrors=parallaxErrorSkyAvg(gmag,sptVabsAndVmini[spt][1])
relparerrors=parerrors*10**logdistancekpc/1000.0
plt.loglog(10**logdistancekpc, relparerrors,'--k',lw=1)
plt.loglog(10**logdistancekpc[indices], relparerrors[indices],'-',label=spt)
plt.xlim(0.1,20.0)
plt.ylim(0.001,0.5)
plt.text(0.9, 0.05,'Colours indicate $14<V<16$',
horizontalalignment='right',
verticalalignment='bottom',
transform = currentAxis.transAxes)
plt.legend(loc=2)
plt.xlabel('distance [kpc]')
plt.ylabel('$\\sigma_\\varpi/\\varpi$')
plt.grid(which='both')
if (args['pdfOutput']):
plt.savefig('RelativeParallaxErrorsVsDist.pdf')
elif (args['pngOutput']):
plt.savefig('RelativeParallaxErrorsVsDist.png')
else:
plt.show()
|
Make the plot with radial velocity performance predictions.
|
def makePlot(args):
"""
Make the plot with radial velocity performance predictions.
:argument args: command line arguments
"""
gRvs=np.linspace(5.7,16.1,101)
spts=['B0V', 'B5V', 'A0V', 'A5V', 'F0V', 'G0V',
'G5V', 'K0V', 'K1IIIMP', 'K4V', 'K1III']
fig=plt.figure(figsize=(10,6.5))
deltaHue = 240.0/(len(spts)-1)
hsv=np.zeros((1,1,3))
hsv[0,0,1]=1.0
hsv[0,0,2]=0.9
count=0
for spt in spts:
hsv[0,0,0]=(240-count*deltaHue)/360.0
vmag = vminGrvsFromVmini(vminiFromSpt(spt)) + gRvs
vradErrors = vradErrorSkyAvg(vmag, spt)
plt.plot(vmag, vradErrors, '-', label=spt, color=hsv_to_rgb(hsv)[0,0,:])
count+=1
plt.grid(which='both')
plt.xlim(9,17.5)
plt.ylim(0,20)
plt.xticks(np.arange(9,18,1))
plt.yticks(np.arange(0,20.5,5))
plt.xlabel('$V$ [mag]')
plt.ylabel('End-of-mission radial velocity error [km s$^{-1}$]')
leg=plt.legend(loc=0, handlelength=2.0, labelspacing=0.10)
for t in leg.get_texts():
t.set_fontsize(12)
if (args['pdfOutput']):
plt.savefig('RadialVelocityErrors.pdf')
elif (args['pngOutput']):
plt.savefig('RadialVelocityErrors.png')
else:
plt.show()
|
A utility function for selecting the first non - null query.
|
def either(*funcs):
"""
A utility function for selecting the first non-null query.
Parameters:
funcs: One or more functions
Returns:
A function that, when called with a :class:`Node`, will
pass the input to each `func`, and return the first non-Falsey
result.
Examples:
>>> s = Soupy("<p>hi</p>")
>>> s.apply(either(Q.find('a'), Q.find('p').text))
Scalar('hi')
"""
def either(val):
for func in funcs:
result = val.apply(func)
if result:
return result
return Null()
return either
|
Decorator for eval_ that prints a helpful error message if an exception is generated in a Q expression
|
def _helpful_failure(method):
"""
Decorator for eval_ that prints a helpful error message
if an exception is generated in a Q expression
"""
@wraps(method)
def wrapper(self, val):
try:
return method(self, val)
except:
exc_cls, inst, tb = sys.exc_info()
if hasattr(inst, '_RERAISE'):
_, expr, _, inner_val = Q.__debug_info__
Q.__debug_info__ = QDebug(self, expr, val, inner_val)
raise
if issubclass(exc_cls, KeyError): # Overrides formatting
exc_cls = QKeyError
# Show val, unless it's too long
prettyval = repr(val)
if len(prettyval) > 150:
prettyval = "<%s instance>" % (type(val).__name__)
msg = "{0}\n\n\tEncountered when evaluating {1}{2}".format(
inst, prettyval, self)
new_exc = exc_cls(msg)
new_exc._RERAISE = True
Q.__debug_info__ = QDebug(self, self, val, val)
six.reraise(exc_cls, new_exc, tb)
return wrapper
|
Convert to unicode and add quotes if initially a string
|
def _uniquote(value):
"""
Convert to unicode, and add quotes if initially a string
"""
if isinstance(value, six.binary_type):
try:
value = value.decode('utf-8')
except UnicodeDecodeError: # Not utf-8. Show the repr
value = six.text_type(_dequote(repr(value))) # trim quotes
result = six.text_type(value)
if isinstance(value, six.text_type):
result = "'%s'" % result
return result
|
Call func on each element in the collection.
|
def each(self, *funcs):
"""
Call `func` on each element in the collection.
If multiple functions are provided, each item
in the output will be a tuple of each
func(item) in self.
Returns a new Collection.
Example:
>>> col = Collection([Scalar(1), Scalar(2)])
>>> col.each(Q * 10)
Collection([Scalar(10), Scalar(20)])
>>> col.each(Q * 10, Q - 1)
Collection([Scalar((10, 0)), Scalar((20, 1))])
"""
funcs = list(map(_make_callable, funcs))
if len(funcs) == 1:
return Collection(map(funcs[0], self._items))
tupler = lambda item: Scalar(
tuple(_unwrap(func(item)) for func in funcs))
return Collection(map(tupler, self._items))
|
Return a new Collection excluding some items
|
def exclude(self, func=None):
"""
Return a new Collection excluding some items
Parameters:
func : function(Node) -> Scalar
A function that, when called on each item
in the collection, returns a boolean-like
value. If no function is provided, then
truthy items will be removed.
Returns:
A new Collection consisting of the items
where bool(func(item)) == False
"""
func = _make_callable(func)
inverse = lambda x: not func(x)
return self.filter(inverse)
|
Return a new Collection with some items removed.
|
def filter(self, func=None):
"""
Return a new Collection with some items removed.
Parameters:
func : function(Node) -> Scalar
A function that, when called on each item
in the collection, returns a boolean-like
value. If no function is provided, then
false-y items will be removed.
Returns:
A new Collection consisting of the items
where bool(func(item)) == True
Examples:
node.find_all('a').filter(Q['href'].startswith('http'))
"""
func = _make_callable(func)
return Collection(filter(func, self._items))
|
Return a new Collection with the last few items removed.
|
def takewhile(self, func=None):
"""
Return a new Collection with the last few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
at and after the first item where bool(func(item)) == False
Examples:
node.find_all('tr').takewhile(Q.find_all('td').count() > 3)
"""
func = _make_callable(func)
return Collection(takewhile(func, self._items))
|
Return a new Collection with the first few items removed.
|
def dropwhile(self, func=None):
"""
Return a new Collection with the first few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
before the first item where bool(func(item)) == True
"""
func = _make_callable(func)
return Collection(dropwhile(func, self._items))
|
Build a list of dicts by calling: meth: Node. dump on each item.
|
def dump(self, *args, **kwargs):
"""
Build a list of dicts, by calling :meth:`Node.dump`
on each item.
Each keyword provides a function that extracts a value
from a Node.
Examples:
>>> c = Collection([Scalar(1), Scalar(2)])
>>> c.dump(x2=Q*2, m1=Q-1).val()
[{'x2': 2, 'm1': 0}, {'x2': 4, 'm1': 1}]
"""
return self.each(Q.dump(*args, **kwargs))
|
Zip the items of this collection with one or more other sequences and wrap the result.
|
def zip(self, *others):
"""
Zip the items of this collection with one or more
other sequences, and wrap the result.
Unlike Python's zip, all sequences must be the same length.
Parameters:
others: One or more iterables or Collections
Returns:
A new collection.
Examples:
>>> c1 = Collection([Scalar(1), Scalar(2)])
>>> c2 = Collection([Scalar(3), Scalar(4)])
>>> c1.zip(c2).val()
[(1, 3), (2, 4)]
"""
args = [_unwrap(item) for item in (self,) + others]
ct = self.count()
if not all(len(arg) == ct for arg in args):
raise ValueError("Arguments are not all the same length")
return Collection(map(Wrapper.wrap, zip(*args)))
|
Turn this collection into a Scalar ( dict ) by zipping keys and items.
|
def dictzip(self, keys):
"""
Turn this collection into a Scalar(dict), by zipping keys and items.
Parameters:
keys: list or Collection of NavigableStrings
The keys of the dictionary
Examples:
>>> c = Collection([Scalar(1), Scalar(2)])
>>> c.dictzip(['a', 'b']).val() == {'a': 1, 'b': 2}
True
"""
return Scalar(dict(zip(_unwrap(keys), self.val())))
|
Find a single Node among this Node s descendants.
|
def find(self, *args, **kwargs):
"""
Find a single Node among this Node's descendants.
Returns :class:`NullNode` if nothing matches.
This inputs to this function follow the same semantics
as BeautifulSoup. See http://bit.ly/bs4doc for more info.
Examples:
- node.find('a') # look for `a` tags
- node.find('a', 'foo') # look for `a` tags with class=`foo`
- node.find(func) # find tag where func(tag) is True
- node.find(val=3) # look for tag like <a, val=3>
"""
op = operator.methodcaller('find', *args, **kwargs)
return self._wrap_node(op)
|
Like: meth: find but searches through: attr: next_siblings
|
def find_next_sibling(self, *args, **kwargs):
"""
Like :meth:`find`, but searches through :attr:`next_siblings`
"""
op = operator.methodcaller('find_next_sibling', *args, **kwargs)
return self._wrap_node(op)
|
Like: meth: find but searches through: attr: parents
|
def find_parent(self, *args, **kwargs):
"""
Like :meth:`find`, but searches through :attr:`parents`
"""
op = operator.methodcaller('find_parent', *args, **kwargs)
return self._wrap_node(op)
|
Like: meth: find but searches through: attr: previous_siblings
|
def find_previous_sibling(self, *args, **kwargs):
"""
Like :meth:`find`, but searches through :attr:`previous_siblings`
"""
op = operator.methodcaller('find_previous_sibling', *args, **kwargs)
return self._wrap_node(op)
|
Like: meth: find but selects all matches ( not just the first one ).
|
def find_all(self, *args, **kwargs):
"""
Like :meth:`find`, but selects all matches (not just the first one).
Returns a :class:`Collection`.
If no elements match, this returns a Collection with no items.
"""
op = operator.methodcaller('find_all', *args, **kwargs)
return self._wrap_multi(op)
|
Like: meth: find_all but searches through: attr: next_siblings
|
def find_next_siblings(self, *args, **kwargs):
"""
Like :meth:`find_all`, but searches through :attr:`next_siblings`
"""
op = operator.methodcaller('find_next_siblings', *args, **kwargs)
return self._wrap_multi(op)
|
Like: meth: find_all but searches through: attr: parents
|
def find_parents(self, *args, **kwargs):
"""
Like :meth:`find_all`, but searches through :attr:`parents`
"""
op = operator.methodcaller('find_parents', *args, **kwargs)
return self._wrap_multi(op)
|
Like: meth: find_all but searches through: attr: previous_siblings
|
def find_previous_siblings(self, *args, **kwargs):
"""
Like :meth:`find_all`, but searches through :attr:`previous_siblings`
"""
op = operator.methodcaller('find_previous_siblings', *args, **kwargs)
return self._wrap_multi(op)
|
Like: meth: find_all but takes a CSS selector string as input.
|
def select(self, selector):
"""
Like :meth:`find_all`, but takes a CSS selector string as input.
"""
op = operator.methodcaller('select', selector)
return self._wrap_multi(op)
|
Return potential locations of IACA installation.
|
def serach_path():
"""Return potential locations of IACA installation."""
operating_system = get_os()
# 1st choice: in ~/.kerncraft/iaca-{}
# 2nd choice: in package directory / iaca-{}
return [os.path.expanduser("~/.kerncraft/iaca/{}/".format(operating_system)),
os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/iaca/{}/'.format(
operating_system)]
|
Return ( hopefully ) valid installation of IACA.
|
def find_iaca():
"""Return (hopefully) valid installation of IACA."""
requires = ['iaca2.2', 'iaca2.3', 'iaca3.0']
for path in serach_path():
path += 'bin/'
valid = True
for r in requires:
if not os.path.exists(path + r):
valid = False
break
if valid:
return path
raise RuntimeError("No IACA installation found in {}. Run iaca_get command to fix this issue."
"".format(serach_path()))
|
Yild all groups of simple regex - like expression.
|
def group_iterator(group):
"""
Yild all groups of simple regex-like expression.
The only special character is a dash (-), which take the preceding and the following chars to
compute a range. If the range is non-sensical (e.g., b-a) it will be empty
Example:
>>> list(group_iterator('a-f'))
['a', 'b', 'c', 'd', 'e', 'f']
>>> list(group_iterator('148'))
['1', '4', '8']
>>> list(group_iterator('7-9ab'))
['7', '8', '9', 'a', 'b']
>>> list(group_iterator('0B-A1'))
['0', '1']
"""
ordered_chars = string.ascii_letters + string.digits
tokenizer = ('(?P<seq>[a-zA-Z0-9]-[a-zA-Z0-9])|'
'(?P<chr>.)')
for m in re.finditer(tokenizer, group):
if m.group('seq'):
start, sep, end = m.group('seq')
for i in range(ordered_chars.index(start), ordered_chars.index(end)+1):
yield ordered_chars[i]
else:
yield m.group('chr')
|
Very reduced regular expressions for describing a group of registers.
|
def register_options(regdescr):
"""
Very reduced regular expressions for describing a group of registers.
Only groups in square bracktes and unions with pipes (|) are supported.
Examples:
>>> list(register_options('PMC[0-3]'))
['PMC0', 'PMC1', 'PMC2', 'PMC3']
>>> list(register_options('MBOX0C[01]'))
['MBOX0C0', 'MBOX0C1']
>>> list(register_options('CBOX2C1'))
['CBOX2C1']
>>> list(register_options('CBOX[0-3]C[01]'))
['CBOX0C0', 'CBOX0C1', 'CBOX1C0', 'CBOX1C1', 'CBOX2C0', 'CBOX2C1', 'CBOX3C0', 'CBOX3C1']
>>> list(register_options('PMC[0-1]|PMC[23]'))
['PMC0', 'PMC1', 'PMC2', 'PMC3']
"""
if not regdescr:
yield None
tokenizer = ('\[(?P<grp>[^]]+)\]|'
'(?P<chr>.)')
for u in regdescr.split('|'):
m = re.match(tokenizer, u)
if m.group('grp'):
current = group_iterator(m.group('grp'))
else:
current = [m.group('chr')]
for c in current:
if u[m.end():]:
for r in register_options(u[m.end():]):
yield c + r
else:
yield c
|
Return a LIKWID event string from an event tuple or keyword arguments.
|
def eventstr(event_tuple=None, event=None, register=None, parameters=None):
"""
Return a LIKWID event string from an event tuple or keyword arguments.
*event_tuple* may have two or three arguments: (event, register) or
(event, register, parameters)
Keyword arguments will be overwritten by *event_tuple*.
>>> eventstr(('L1D_REPLACEMENT', 'PMC0', None))
'L1D_REPLACEMENT:PMC0'
>>> eventstr(('L1D_REPLACEMENT', 'PMC0'))
'L1D_REPLACEMENT:PMC0'
>>> eventstr(('MEM_UOPS_RETIRED_LOADS', 'PMC3', {'EDGEDETECT': None, 'THRESHOLD': 2342}))
'MEM_UOPS_RETIRED_LOADS:PMC3:EDGEDETECT:THRESHOLD=0x926'
>>> eventstr(event='DTLB_LOAD_MISSES_WALK_DURATION', register='PMC3')
'DTLB_LOAD_MISSES_WALK_DURATION:PMC3'
"""
if len(event_tuple) == 3:
event, register, parameters = event_tuple
elif len(event_tuple) == 2:
event, register = event_tuple
event_dscr = [event, register]
if parameters:
for k, v in sorted(event_tuple[2].items()): # sorted for reproducability
if type(v) is int:
k += "={}".format(hex(v))
event_dscr.append(k)
return ":".join(event_dscr)
|
Compile list of minimal runs for given events.
|
def build_minimal_runs(events):
"""Compile list of minimal runs for given events."""
# Eliminate multiples
events = [e for i, e in enumerate(events) if events.index(e) == i]
# Build list of runs per register group
scheduled_runs = {}
scheduled_events = []
cur_run = 0
while len(scheduled_events) != len(events):
for event_tpl in events:
event, registers, parameters = event_tpl
# Skip allready scheduled events
if event_tpl in scheduled_events:
continue
# Compile explicit list of possible register locations
for possible_reg in register_options(registers):
# Schedule in current run, if register is not yet in use
s = scheduled_runs.setdefault(cur_run, {})
if possible_reg not in s:
s[possible_reg] = (event, possible_reg, parameters)
# ban from further scheduling attempts
scheduled_events.append(event_tpl)
break
cur_run += 1
# Collaps all register dicts to single runs
runs = [list(v.values()) for v in scheduled_runs.values()]
return runs
|
Apply cache prediction to generate cache access behaviour.
|
def calculate_cache_access(self):
"""Apply cache prediction to generate cache access behaviour."""
self.results = {'misses': self.predictor.get_misses(),
'hits': self.predictor.get_hits(),
'evicts': self.predictor.get_evicts(),
'verbose infos': self.predictor.get_infos(), # only for verbose outputs
'bottleneck level': 0,
'mem bottlenecks': []}
element_size = self.kernel.datatypes_size[self.kernel.datatype]
cacheline_size = float(self.machine['cacheline size'])
elements_per_cacheline = int(cacheline_size // element_size)
total_flops = sum(self.kernel._flops.values())*elements_per_cacheline
# TODO let user choose threads_per_core:
threads_per_core = 1
# Compile relevant information
# CPU-L1 stats (in bytes!)
# We compile CPU-L1 stats on our own, because cacheprediction only works on cache lines
read_offsets, write_offsets = zip(*list(self.kernel.compile_global_offsets(
iteration=range(0, elements_per_cacheline))))
read_offsets = set([item for sublist in read_offsets if sublist is not None
for item in sublist])
write_offsets = set([item for sublist in write_offsets if sublist is not None
for item in sublist])
write_streams = len(write_offsets)
read_streams = len(read_offsets) + write_streams # write-allocate
total_loads = read_streams * element_size
# total_evicts = write_streams * element_size
bw, measurement_kernel = self.machine.get_bandwidth(
0,
read_streams - write_streams, # no write-allocate in L1
write_streams,
threads_per_core,
cores=self.cores)
# Calculate performance (arithmetic intensity * bandwidth with
# arithmetic intensity = flops / bytes loaded )
if total_loads == 0:
# This happens in case of full-caching
arith_intens = None
performance = None
else:
arith_intens = float(total_flops)/total_loads
performance = PrefixedUnit(arith_intens * float(bw), 'FLOP/s')
self.results['mem bottlenecks'].append({
'performance': self.conv_perf(PrefixedUnit(performance, 'FLOP/s')),
'level': self.machine['memory hierarchy'][0]['level'],
'arithmetic intensity': arith_intens,
'bw kernel': measurement_kernel,
'bandwidth': bw,
'bytes transfered': total_loads})
self.results['bottleneck level'] = len(self.results['mem bottlenecks'])-1
self.results['min performance'] = self.conv_perf(performance)
# for other cache and memory levels:
for cache_level, cache_info in list(enumerate(self.machine['memory hierarchy']))[:-1]:
# Compiling stats (in bytes!)
total_misses = self.results['misses'][cache_level]*cacheline_size
total_evicts = self.results['evicts'][cache_level]*cacheline_size
# choose bw according to cache level and problem
# first, compile stream counts at current cache level
# write-allocate is allready resolved above
read_streams = self.results['misses'][cache_level]
write_streams = self.results['evicts'][cache_level]
# second, try to find best fitting kernel (closest to stream seen stream counts):
bw, measurement_kernel = self.machine.get_bandwidth(
cache_level+1, read_streams, write_streams, threads_per_core,
cores=self.cores)
# Calculate performance (arithmetic intensity * bandwidth with
# arithmetic intensity = flops / bytes transfered)
bytes_transfered = total_misses + total_evicts
if bytes_transfered == 0:
# This happens in case of full-caching
arith_intens = float('inf')
performance = PrefixedUnit(float('inf'), 'FLOP/s')
else:
arith_intens = float(total_flops)/bytes_transfered
performance = PrefixedUnit(arith_intens * float(bw), 'FLOP/s')
self.results['mem bottlenecks'].append({
'performance': self.conv_perf(performance),
'level': (self.machine['memory hierarchy'][cache_level + 1]['level']),
'arithmetic intensity': arith_intens,
'bw kernel': measurement_kernel,
'bandwidth': bw,
'bytes transfered': bytes_transfered})
if performance < self.results.get('min performance', {'FLOP/s': performance})['FLOP/s']:
self.results['bottleneck level'] = len(self.results['mem bottlenecks'])-1
self.results['min performance'] = self.conv_perf(performance)
return self.results
|
Run analysis.
|
def analyze(self):
"""Run analysis."""
precision = 'DP' if self.kernel.datatype == 'double' else 'SP'
self.calculate_cache_access()
self.results['max_perf'] = self.conv_perf(self.machine['clock'] * self.cores * \
self.machine['FLOPs per cycle'][precision]['total'])
|
Convert performance ( FLOP/ s ) to other units such as It/ s or cy/ CL.
|
def conv_perf(self, performance):
"""Convert performance (FLOP/s) to other units, such as It/s or cy/CL."""
clock = self.machine['clock']
flops_per_it = sum(self.kernel._flops.values())
it_s = performance/flops_per_it
it_s.unit = 'It/s'
element_size = self.kernel.datatypes_size[self.kernel.datatype]
elements_per_cacheline = int(float(self.machine['cacheline size'])) / element_size
cy_cl = clock/it_s*elements_per_cacheline
cy_cl.unit = 'cy/CL'
cy_it = clock/it_s
cy_it.unit = 'cy/It'
return {'It/s': it_s,
'cy/CL': cy_cl,
'cy/It': cy_it,
'FLOP/s': performance}
|
Report analysis outcome in human readable form.
|
def report(self, output_file=sys.stdout):
"""Report analysis outcome in human readable form."""
max_perf = self.results['max_perf']
if self._args and self._args.verbose >= 3:
print('{}'.format(pformat(self.results)), file=output_file)
if self._args and self._args.verbose >= 1:
print('{}'.format(pformat(self.results['verbose infos'])), file=output_file)
print('Bottlenecks:', file=output_file)
print(' level | a. intensity | performance | peak bandwidth | peak bandwidth kernel',
file=output_file)
print('--------+--------------+-----------------+-------------------+----------------------',
file=output_file)
print(' CPU | | {!s:>15} | |'.format(
max_perf[self._args.unit]),
file=output_file)
for b in self.results['mem bottlenecks']:
print('{level:>7} | {arithmetic intensity:>5.2} FLOP/B | {0!s:>15} |'
' {bandwidth!s:>17} | {bw kernel:<8}'.format(
b['performance'][self._args.unit], **b),
file=output_file)
print('', file=output_file)
if self.results['min performance']['FLOP/s'] > max_perf['FLOP/s']:
# CPU bound
print('CPU bound. {!s} due to CPU max. FLOP/s'.format(max_perf), file=output_file)
else:
# Cache or mem bound
print('Cache or mem bound.', file=output_file)
bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']]
print('{!s} due to {} transfer bottleneck (with bw from {} benchmark)'.format(
bottleneck['performance'][self._args.unit],
bottleneck['level'],
bottleneck['bw kernel']),
file=output_file)
print('Arithmetic Intensity: {:.2f} FLOP/B'.format(bottleneck['arithmetic intensity']),
file=output_file)
|
Run complete analysis.
|
def analyze(self):
"""Run complete analysis."""
self.results = self.calculate_cache_access()
try:
iaca_analysis, asm_block = self.kernel.iaca_analysis(
micro_architecture=self.machine['micro-architecture'],
asm_block=self.asm_block,
pointer_increment=self.pointer_increment,
verbose=self.verbose > 2)
except RuntimeError as e:
print("IACA analysis failed: " + str(e))
sys.exit(1)
block_throughput = iaca_analysis['throughput']
uops = iaca_analysis['uops']
iaca_output = iaca_analysis['output']
port_cycles = iaca_analysis['port cycles']
# Normalize to cycles per cacheline
elements_per_block = abs(asm_block['pointer_increment']
/ self.kernel.datatypes_size[self.kernel.datatype])
block_size = elements_per_block*self.kernel.datatypes_size[self.kernel.datatype]
try:
block_to_cl_ratio = float(self.machine['cacheline size'])/block_size
except ZeroDivisionError as e:
print("Too small block_size / pointer_increment:", e, file=sys.stderr)
sys.exit(1)
port_cycles = dict([(i[0], i[1]*block_to_cl_ratio) for i in list(port_cycles.items())])
uops = uops*block_to_cl_ratio
cl_throughput = block_throughput*block_to_cl_ratio
flops_per_element = sum(self.kernel._flops.values())
# Overwrite CPU-L1 stats, because they are covered by IACA
self.results['mem bottlenecks'][0] = None
# Reevaluate mem bottleneck
self.results['min performance'] = self.conv_perf(PrefixedUnit(float('inf'), 'FLOP/s'))
self.results['bottleneck level'] = None
for level, bottleneck in enumerate(self.results['mem bottlenecks']):
if level == 0:
# ignoring CPU-L1
continue
if bottleneck['performance']['FLOP/s'] < self.results['min performance']['FLOP/s']:
self.results['bottleneck level'] = level
self.results['min performance'] = bottleneck['performance']
# Create result dictionary
self.results.update({
'cpu bottleneck': {
'port cycles': port_cycles,
'cl throughput': cl_throughput,
'uops': uops,
'performance throughput': self.conv_perf(PrefixedUnit(
self.machine['clock']/block_throughput*elements_per_block*flops_per_element
* self.cores, "FLOP/s")),
'IACA output': iaca_output}})
|
Print human readable report of model.
|
def report(self, output_file=sys.stdout):
"""Print human readable report of model."""
cpu_perf = self.results['cpu bottleneck']['performance throughput']
if self.verbose >= 3:
print('{}'.format(pformat(self.results)), file=output_file)
if self.verbose >= 1:
print('Bottlenecks:', file=output_file)
print(' level | a. intensity | performance | peak bandwidth | peak bandwidth kernel',
file=output_file)
print('--------+--------------+-----------------+-------------------+----------------------',
file=output_file)
print(' CPU | | {!s:>15} | |'.format(
cpu_perf[self._args.unit]),
file=output_file)
for b in self.results['mem bottlenecks']:
# Skip CPU-L1 from Roofline model
if b is None:
continue
print('{level:>7} | {arithmetic intensity:>5.2} FLOP/B | {0!s:>15} |'
' {bandwidth!s:>17} | {bw kernel:<8}'.format(
b['performance'][self._args.unit], **b),
file=output_file)
print('', file=output_file)
print('IACA analisys:', file=output_file)
print('{!s}'.format(
{k: v
for k, v in list(self.results['cpu bottleneck'].items())
if k not in['IACA output']}),
file=output_file)
if self.results['min performance']['FLOP/s'] > cpu_perf['FLOP/s']:
# CPU bound
print('CPU bound. {!s} due to CPU bottleneck'.format(cpu_perf[self._args.unit]),
file=output_file)
else:
# Cache or mem bound
print('Cache or mem bound.', file=output_file)
bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']]
print('{!s} due to {} transfer bottleneck (with bw from {} benchmark)'.format(
bottleneck['performance'][self._args.unit],
bottleneck['level'],
bottleneck['bw kernel']),
file=output_file)
print('Arithmetic Intensity: {:.2f} FLOP/B'.format(bottleneck['arithmetic intensity']),
file=output_file)
|
Apply layer condition model to calculate cache accesses.
|
def calculate_cache_access(self):
"""Apply layer condition model to calculate cache accesses."""
# FIXME handle multiple datatypes
element_size = self.kernel.datatypes_size[self.kernel.datatype]
results = {'dimensions': {}}
def sympy_compare(a, b):
c = 0
for i in range(min(len(a), len(b))):
s = a[i] - b[i]
if sympy.simplify(s > 0):
c = -1
elif sympy.simplify(s == 0):
c = 0
else:
c = 1
if c != 0:
break
return c
accesses = defaultdict(list)
sympy_accesses = defaultdict(list)
for var_name in self.kernel.variables:
for r in self.kernel.sources.get(var_name, []):
if r is None:
continue
accesses[var_name].append(r)
sympy_accesses[var_name].append(self.kernel.access_to_sympy(var_name, r))
for w in self.kernel.destinations.get(var_name, []):
if w is None:
continue
accesses[var_name].append(w)
sympy_accesses[var_name].append(self.kernel.access_to_sympy(var_name, w))
# order accesses by increasing order
accesses[var_name].sort(key=cmp_to_key(sympy_compare), reverse=True)
results['accesses'] = accesses
results['sympy_accesses'] = sympy_accesses
# For each dimension (1D, 2D, 3D ... nD)
for dimension in range(1, len(list(self.kernel.get_loop_stack()))+1):
results['dimensions'][dimension] = {}
slices = defaultdict(list)
slices_accesses = defaultdict(list)
for var_name in accesses:
for a in accesses[var_name]:
# slices are identified by the tuple of indices of higher dimensions
slice_id = tuple([var_name, tuple(a[:-dimension])])
slices[slice_id].append(a)
slices_accesses[slice_id].append(self.kernel.access_to_sympy(var_name, a))
results['dimensions'][dimension]['slices'] = slices
results['dimensions'][dimension]['slices_accesses'] = slices_accesses
slices_distances = defaultdict(list)
for k, v in slices_accesses.items():
for i in range(1, len(v)):
slices_distances[k].append((v[i] - v[i-1]).simplify())
results['dimensions'][dimension]['slices_distances'] = slices_distances
# Check that distances contain only free_symbols based on constants
for dist in chain(*slices_distances.values()):
if any([s not in self.kernel.constants.keys() for s in dist.free_symbols]):
raise ValueError("Some distances are not based on constants: "+str(dist))
# Sum of lengths between relative distances
slices_sum = sum([sum(dists) for dists in slices_distances.values()])
results['dimensions'][dimension]['slices_sum'] = slices_sum
# Max of lengths between relative distances
# Work-around, the arguments with the most symbols get to stay
# FIXME, may not be correct in all cases. e.g., N+M vs. N*M
def FuckedUpMax(*args):
if len(args) == 1:
return args[0]
# expand all expressions:
args = [a.expand() for a in args]
# Filter expressions with less than the maximum number of symbols
max_symbols = max([len(a.free_symbols) for a in args])
args = list(filter(lambda a: len(a.free_symbols) == max_symbols, args))
if max_symbols == 0:
return sympy.Max(*args)
# Filter symbols with lower exponent
max_coeffs = 0
for a in args:
for s in a.free_symbols:
max_coeffs = max(max_coeffs, len(sympy.Poly(a, s).all_coeffs()))
def coeff_filter(a):
return max(
0, 0,
*[len(sympy.Poly(a, s).all_coeffs()) for s in a.free_symbols]) == max_coeffs
args = list(filter(coeff_filter, args))
m = sympy.Max(*args)
# if m.is_Function:
# raise ValueError("Could not resolve {} to maximum.".format(m))
return m
slices_max = FuckedUpMax(sympy.Integer(0),
*[FuckedUpMax(*dists) for dists in slices_distances.values()])
results['dimensions'][dimension]['slices_max'] = slices_max
# Nmber of slices
slices_count = len(slices_accesses)
results['dimensions'][dimension]['slices_count'] = slices_count
# Cache requirement expression
cache_requirement_bytes = (slices_sum + slices_max*slices_count)*element_size
results['dimensions'][dimension]['cache_requirement_bytes'] = cache_requirement_bytes
# Apply to all cache sizes
csim = self.machine.get_cachesim(self._args.cores)
results['dimensions'][dimension]['caches'] = {}
for cl in csim.levels(with_mem=False):
cache_equation = sympy.Eq(cache_requirement_bytes, cl.size())
if len(self.kernel.constants.keys()) <= 1:
inequality = sympy.solve(sympy.LessThan(cache_requirement_bytes, cl.size()),
*self.kernel.constants.keys())
else:
# Sympy does not solve for multiple constants
inequality = sympy.LessThan(cache_requirement_bytes, cl.size())
try:
eq = sympy.solve(inequality, *self.kernel.constants.keys(), dict=True)
except NotImplementedError:
eq = None
results['dimensions'][dimension]['caches'][cl.name] = {
'cache_size': cl.size(),
'equation': cache_equation,
'lt': inequality,
'eq': eq
}
return results
|
Run complete analysis.
|
def analyze(self):
"""Run complete analysis."""
# check that layer conditions can be applied on this kernel:
# 1. All iterations may only have a step width of 1
loop_stack = list(self.kernel.get_loop_stack())
if any([l['increment'] != 1 for l in loop_stack]):
raise ValueError("Can not apply layer condition, since not all loops are of step "
"length 1.")
# 2. The order of iterations must be reflected in the order of indices in all array
# references containing the inner loop index. If the inner loop index is not part of the
# reference, the reference is simply ignored
# TODO support flattend array indexes
for aref in list(self.kernel.index_order()):
# Strip left most empty sets (refereces without index)
while aref and len(aref[0]) == 0:
aref.pop(0)
for i, idx_names in enumerate(aref):
# 1. Check for that there are enough loops to handle access dimensions
# 2. Check that offset index matches loop index (in same order)
if i >= len(loop_stack) or \
any([loop_stack[i]['index'] != idx.name for idx in idx_names]):
raise ValueError("Can not apply layer condition, order of indices in array "
"does not follow order of loop indices. Single-dimension is "
"currently not supported.")
# 3. Indices may only increase with one
# TODO use a public interface, not self.kernel._*
for arefs in chain(chain(*self.kernel.sources.values()),
chain(*self.kernel.destinations.values())):
if not arefs:
continue
# Strip left most constant offsets (refereces without index) to support things like:
# a[0][i+1][j][k-1] with an i, j and k loop-nest
while arefs and not arefs[0].free_symbols:
arefs = arefs[1:]
# Check that remaining indices are in orde
for i, expr in enumerate(arefs):
diff = sympy.diff(expr, sympy.Symbol(loop_stack[i]['index']))
if diff != 0 and diff != 1:
# TODO support -1 aswell
raise ValueError("Can not apply layer condition, array references may not "
"increment more then one per iteration.")
self.results = self.calculate_cache_access()
|
Report generated model in human readable form.
|
def report(self, output_file=sys.stdout):
"""Report generated model in human readable form."""
if self._args and self._args.verbose > 2:
pprint(self.results)
for dimension, lc_info in self.results['dimensions'].items():
print("{}D layer condition:".format(dimension), file=output_file)
for cache, lc_solution in sorted(lc_info['caches'].items()):
print(cache+": ", end='', file=output_file)
if lc_solution['lt'] is sympy.true:
print("unconditionally fulfilled", file=output_file)
else:
if lc_solution['eq'] is None:
print("{}".format(lc_solution['lt']), file=output_file)
elif type(lc_solution['eq']) is not list:
print("{}".format(lc_solution['eq']), file=output_file)
else:
for solu in lc_solution['eq']:
for s, v in solu.items():
print("{} <= {}".format(s, v), file=output_file)
|
Naive comment and macro striping from source code
|
def clean_code(code, comments=True, macros=False, pragmas=False):
"""
Naive comment and macro striping from source code
:param comments: If True, all comments are stripped from code
:param macros: If True, all macros are stripped from code
:param pragmas: If True, all pragmas are stripped from code
:return: cleaned code. Line numbers are preserved with blank lines,
and multiline comments and macros are supported. BUT comment-like
strings are (wrongfully) treated as comments.
"""
if macros or pragmas:
lines = code.split('\n')
in_macro = False
in_pragma = False
for i in range(len(lines)):
l = lines[i].strip()
if macros and (l.startswith('#') and not l.startswith('#pragma') or in_macro):
lines[i] = ''
in_macro = l.endswith('\\')
if pragmas and (l.startswith('#pragma') or in_pragma):
lines[i] = ''
in_pragma = l.endswith('\\')
code = '\n'.join(lines)
if comments:
idx = 0
comment_start = None
while idx < len(code) - 1:
if comment_start is None and code[idx:idx + 2] == '//':
end_idx = code.find('\n', idx)
code = code[:idx] + code[end_idx:]
idx -= end_idx - idx
elif comment_start is None and code[idx:idx + 2] == '/*':
comment_start = idx
elif comment_start is not None and code[idx:idx + 2] == '*/':
code = (code[:comment_start] +
'\n' * code[comment_start:idx].count('\n') +
code[idx + 2:])
idx -= idx - comment_start
comment_start = None
idx += 1
return code
|
Replace all matching ID nodes in ast ( in - place ) with replacement.
|
def replace_id(ast, id_name, replacement):
"""
Replace all matching ID nodes in ast (in-place), with replacement.
:param id_name: name of ID node to match
:param replacement: single or list of node to insert in replacement for ID node.
"""
for a in ast:
if isinstance(a, c_ast.ID) and a.name == id_name:
# Check all attributes of ast
for attr_name in dir(ast):
# Exclude special and method attributes
if attr_name.startswith('__') or callable(getattr(ast, attr_name)):
continue
attr = getattr(ast, attr_name)
# In case of direct match, just replace
if attr is a:
setattr(ast, attr_name, replacement)
# If contained in list replace occurrence with replacement
if type(attr) is list:
for i, attr_element in enumerate(attr):
if attr_element is a:
if type(replacement) is list:
# If replacement is list, inject
attr[i:i+1] = replacement
else:
# otherwise replace
attr[i] = replacement
else:
replace_id(a, id_name, replacement)
|
Round float to next multiple of base.
|
def round_to_next(x, base):
"""Round float to next multiple of base."""
# Based on: http://stackoverflow.com/a/2272174
return int(base * math.ceil(float(x)/base))
|
Split list of integers into blocks of block_size and return block indices.
|
def blocking(indices, block_size, initial_boundary=0):
"""
Split list of integers into blocks of block_size and return block indices.
First block element will be located at initial_boundary (default 0).
>>> blocking([0, -1, -2, -3, -4, -5, -6, -7, -8, -9], 8)
[0,-1]
>>> blocking([0], 8)
[0]
>>> blocking([0], 8, initial_boundary=32)
[-4]
"""
blocks = []
for idx in indices:
bl_idx = (idx-initial_boundary)//float(block_size)
if bl_idx not in blocks:
blocks.append(bl_idx)
blocks.sort()
return blocks
|
Dispatch to cache predictor to get cache stats.
|
def calculate_cache_access(self):
"""Dispatch to cache predictor to get cache stats."""
self.results.update({
'cycles': [], # will be filled by caclculate_cycles()
'misses': self.predictor.get_misses(),
'hits': self.predictor.get_hits(),
'evicts': self.predictor.get_evicts(),
'verbose infos': self.predictor.get_infos()})
|
Calculate performance model cycles from cache stats.
|
def calculate_cycles(self):
"""
Calculate performance model cycles from cache stats.
calculate_cache_access() needs to have been execute before.
"""
element_size = self.kernel.datatypes_size[self.kernel.datatype]
elements_per_cacheline = float(self.machine['cacheline size']) // element_size
iterations_per_cacheline = (sympy.Integer(self.machine['cacheline size']) /
sympy.Integer(self.kernel.bytes_per_iteration))
self.results['iterations per cacheline'] = iterations_per_cacheline
cacheline_size = float(self.machine['cacheline size'])
loads, stores = (self.predictor.get_loads(), self.predictor.get_stores())
for cache_level, cache_info in list(enumerate(self.machine['memory hierarchy']))[1:]:
throughput, duplexness = cache_info['non-overlap upstream throughput']
if type(throughput) is str and throughput == 'full socket memory bandwidth':
# Memory transfer
# we use bandwidth to calculate cycles and then add panalty cycles (if given)
# choose bw according to cache level and problem
# first, compile stream counts at current cache level
# write-allocate is allready resolved in cache predictor
read_streams = loads[cache_level]
write_streams = stores[cache_level]
# second, try to find best fitting kernel (closest to stream seen stream counts):
threads_per_core = 1
bw, measurement_kernel = self.machine.get_bandwidth(
cache_level, read_streams, write_streams, threads_per_core)
# calculate cycles
if duplexness == 'half-duplex':
cycles = float(loads[cache_level] + stores[cache_level]) * \
float(elements_per_cacheline) * float(element_size) * \
float(self.machine['clock']) / float(bw)
else: # full-duplex
raise NotImplementedError(
"full-duplex mode is not (yet) supported for memory transfers.")
# add penalty cycles for each read stream
if 'penalty cycles per read stream' in cache_info:
cycles += stores[cache_level] * \
cache_info['penalty cycles per read stream']
self.results.update({
'memory bandwidth kernel': measurement_kernel,
'memory bandwidth': bw})
else:
# since throughput is given in B/cy, and we need CL/cy:
throughput = float(throughput) / cacheline_size
# only cache cycles count
if duplexness == 'half-duplex':
cycles = (loads[cache_level] + stores[cache_level]) / float(throughput)
elif duplexness == 'full-duplex':
cycles = max(loads[cache_level] / float(throughput),
stores[cache_level] / float(throughput))
else:
raise ValueError("Duplexness of cache throughput may only be 'half-duplex'"
"or 'full-duplex', found {} in {}.".format(
duplexness, cache_info['name']))
self.results['cycles'].append((cache_info['level'], cycles))
self.results[cache_info['level']] = cycles
return self.results
|
Run complete anaylysis and return results.
|
def analyze(self):
"""Run complete anaylysis and return results."""
self.calculate_cache_access()
self.calculate_cycles()
self.results['flops per iteration'] = sum(self.kernel._flops.values())
return self.results
|
Print generated model data in human readable format.
|
def report(self, output_file=sys.stdout):
"""Print generated model data in human readable format."""
if self.verbose > 1:
print('{}'.format(pprint.pformat(self.results['verbose infos'])), file=output_file)
for level, cycles in self.results['cycles']:
print('{} = {}'.format(
level, self.conv_cy(cycles)[self._args.unit]), file=output_file)
if self.verbose > 1:
if 'memory bandwidth kernel' in self.results:
print('memory cycles based on {} kernel with {}'.format(
self.results['memory bandwidth kernel'],
self.results['memory bandwidth']),
file=output_file)
if self.verbose > 1:
print(file=output_file)
print(self.report_data_transfers(), file=output_file)
|
Run complete analysis and return results.
|
def analyze(self):
"""
Run complete analysis and return results.
"""
try:
incore_analysis, asm_block = self.kernel.iaca_analysis(
micro_architecture=self.machine['micro-architecture'],
asm_block=self.asm_block,
pointer_increment=self.pointer_increment,
verbose=self.verbose > 2)
except RuntimeError as e:
print("IACA analysis failed: " + str(e))
sys.exit(1)
block_throughput = incore_analysis['throughput']
port_cycles = incore_analysis['port cycles']
uops = incore_analysis['uops']
# Normalize to cycles per cacheline
elements_per_block = abs(asm_block['pointer_increment']
// self.kernel.datatypes_size[self.kernel.datatype])
block_size = elements_per_block*self.kernel.datatypes_size[self.kernel.datatype]
try:
block_to_cl_ratio = float(self.machine['cacheline size'])/block_size
except ZeroDivisionError as e:
print("Too small block_size / pointer_increment:", e, file=sys.stderr)
sys.exit(1)
port_cycles = dict([(i[0], i[1]*block_to_cl_ratio) for i in list(port_cycles.items())])
uops = uops*block_to_cl_ratio
cl_throughput = block_throughput*block_to_cl_ratio
# Compile most relevant information
T_OL = max([v for k, v in list(port_cycles.items())
if k in self.machine['overlapping model']['ports']])
T_nOL = max([v for k, v in list(port_cycles.items())
if k in self.machine['non-overlapping model']['ports']])
# Use IACA throughput prediction if it is slower then T_nOL
if T_nOL < cl_throughput:
T_OL = cl_throughput
# Create result dictionary
self.results = {
'port cycles': port_cycles,
'cl throughput': self.conv_cy(cl_throughput),
'uops': uops,
'T_nOL': T_nOL,
'T_OL': T_OL,
'IACA output': incore_analysis['output'],
'elements_per_block': elements_per_block,
'pointer_increment': asm_block['pointer_increment'],
'flops per iteration': sum(self.kernel._flops.values())}
return self.results
|
Convert cycles ( cy/ CL ) to other units such as FLOP/ s or It/ s.
|
def conv_cy(self, cy_cl):
"""Convert cycles (cy/CL) to other units, such as FLOP/s or It/s."""
if not isinstance(cy_cl, PrefixedUnit):
cy_cl = PrefixedUnit(cy_cl, '', 'cy/CL')
clock = self.machine['clock']
element_size = self.kernel.datatypes_size[self.kernel.datatype]
elements_per_cacheline = int(self.machine['cacheline size']) // element_size
it_s = clock/cy_cl*elements_per_cacheline
it_s.unit = 'It/s'
flops_per_it = sum(self.kernel._flops.values())
performance = it_s*flops_per_it
performance.unit = 'FLOP/s'
cy_it = cy_cl*elements_per_cacheline
cy_it.unit = 'cy/It'
return {'It/s': it_s,
'cy/CL': cy_cl,
'cy/It': cy_it,
'FLOP/s': performance}
|
Print generated model data in human readable format.
|
def report(self, output_file=sys.stdout):
"""Print generated model data in human readable format."""
if self.verbose > 2:
print("IACA Output:", file=output_file)
print(self.results['IACA output'], file=output_file)
print('', file=output_file)
if self.verbose > 1:
print('Detected pointer increment: {}'.format(self.results['pointer_increment']),
file=output_file)
print('Derived elements stored to per asm block iteration: {}'.format(
self.results['elements_per_block']), file=output_file)
print('Ports and cycles:', str(self.results['port cycles']), file=output_file)
print('Uops:', str(self.results['uops']), file=output_file)
print('Throughput: {}'.format(self.results['cl throughput'][self._args.unit]),
file=output_file)
print('T_nOL = {:.1f} cy/CL'.format(self.results['T_nOL']), file=output_file)
print('T_OL = {:.1f} cy/CL'.format(self.results['T_OL']), file=output_file)
|
Run complete analysis.
|
def analyze(self):
"""Run complete analysis."""
self._CPU.analyze()
self._data.analyze()
self.results = copy.deepcopy(self._CPU.results)
self.results.update(copy.deepcopy(self._data.results))
cores_per_numa_domain = self.machine['cores per NUMA domain']
# Compile total single-core prediction
self.results['total cycles'] = self._CPU.conv_cy(max(
self.results['T_OL'],
sum([self.results['T_nOL']] + [i[1] for i in self.results['cycles']])))
T_ECM = float(self.results['total cycles']['cy/CL'])
# T_MEM is the cycles accounted to memory transfers
T_MEM = self.results['cycles'][-1][1]
# Simple scaling prediction:
# Assumptions are:
# - bottleneck is always LLC-MEM
# - all caches scale with number of cores (bw AND size(WRONG!))
# Full caching in higher cache level
self.results['scaling cores'] = float('inf')
# Not full caching:
if self.results['cycles'][-1][1] != 0.0:
# Considering memory bus utilization
utilization = [0]
self.results['scaling cores'] = float('inf')
for c in range(1, cores_per_numa_domain + 1):
if c * T_MEM > (T_ECM + utilization[c - 1] * (c - 1) * T_MEM / 2):
utilization.append(1.0)
self.results['scaling cores'] = min(self.results['scaling cores'], c)
else:
utilization.append(c * T_MEM / (T_ECM + utilization[c - 1] * (c - 1) * T_MEM / 2))
utilization = utilization[1:]
# Old scaling code
#self.results['scaling cores'] = (
# max(self.results['T_OL'],
# self.results['T_nOL'] + sum([c[1] for c in self.results['cycles']])) /
# self.results['cycles'][-1][1])
scaling_predictions = []
for cores in range(1, self.machine['cores per socket'] + 1):
scaling = {'cores': cores, 'notes': [], 'performance': None,
'in-NUMA performance': None}
# Detailed scaling:
if cores <= self.results['scaling cores']:
# Is it purely in-cache?
innuma_rectp = PrefixedUnit(
max(sum([c[1] for c in self.results['cycles']]) + self.results['T_nOL'],
self.results['T_OL']) / (T_ECM/T_MEM),
"cy/CL")
scaling['notes'].append("memory-interface not saturated")
else:
innuma_rectp = PrefixedUnit(self.results['cycles'][-1][1], 'cy/CL')
scaling['notes'].append("memory-interface saturated on first NUMA domain")
# Include NUMA-local performance in results dict
scaling['in-NUMA performance'] = innuma_rectp
if 0 < cores <= cores_per_numa_domain:
# only in-numa scaling to consider
scaling['performance'] = self._CPU.conv_cy(
innuma_rectp / utilization[cores - 1])
scaling['notes'].append("in-NUMA-domain scaling")
elif cores <= self.machine['cores per socket'] * self.machine['sockets']:
# out-of-numa scaling behavior
scaling['performance'] = self._CPU.conv_cy(
innuma_rectp * cores_per_numa_domain / cores)
scaling['notes'].append("out-of-NUMA-domain scaling")
else:
raise ValueError("Number of cores must be greater than zero and upto the max. "
"number of cores defined by cores per socket and sockets in"
"machine file.")
scaling_predictions.append(scaling)
else:
# pure in-cache performace (perfect scaling)
scaling_predictions = [
{'cores': cores, 'notes': ['pure in-cache'],
'performance': self._CPU.conv_cy(T_ECM/cores),
'in-NUMA performance': self._CPU.conv_cy(T_ECM/cores_per_numa_domain)}
for cores in range(1, self.machine['cores per socket'] + 1)]
# Also include prediction for all in-NUMA core counts in results
self.results['scaling prediction'] = scaling_predictions
if self._args.cores:
self.results['multi-core'] = scaling_predictions[self._args.cores - 1]
else:
self.results['multi-core'] = None
|
Print generated model data in human readable format.
|
def report(self, output_file=sys.stdout):
"""Print generated model data in human readable format."""
report = ''
if self.verbose > 1:
self._CPU.report()
self._data.report()
report += '{{ {:.1f} || {:.1f} | {} }} cy/CL'.format(
self.results['T_OL'],
self.results['T_nOL'],
' | '.join(['{:.1f}'.format(i[1]) for i in self.results['cycles']]))
if self._args.cores > 1:
report += " (single core)"
report += ' = {}'.format(self.results['total cycles'][self._args.unit])
report += '\n{{ {:.1f} \ {} }} cy/CL'.format(
max(self.results['T_OL'], self.results['T_nOL']),
' \ '.join(['{:.1f}'.format(max(sum([x[1] for x in self.results['cycles'][:i+1]]) +
self.results['T_nOL'], self.results['T_OL']))
for i in range(len(self.results['cycles']))]))
if self._args.cores > 1:
report += " (single core)"
report += '\nsaturating at {:.0f} cores'.format(self.results['scaling cores'])
if self.results['multi-core']:
report += "\nprediction for {} cores,".format(self.results['multi-core']['cores']) + \
" assuming static scheduling: "
report += "{} ({})\n".format(
self.results['multi-core']['performance'][self._args.unit],
', '.join(self.results['multi-core']['notes']))
if self.results['scaling prediction']:
report += "\nScaling prediction, considering memory bus utilization penalty and " \
"assuming all scalable caches:\n"
if self.machine['cores per socket'] > self.machine['cores per NUMA domain']:
report += "1st NUMA dom." + (len(self._args.unit) - 4) * ' ' + '||' + \
'--------' * (self.machine['cores per NUMA domain']-1) + '-------|\n'
report += "cores " + (len(self._args.unit)+2)*' ' + " || " + ' | '.join(
['{:<5}'.format(s['cores']) for s in self.results['scaling prediction']]) + '\n'
report += "perf. ({}) || ".format(self._args.unit) + ' | '.join(
['{:<5.1f}'.format(float(s['performance'][self._args.unit]))
for s in self.results['scaling prediction']]) + '\n'
print(report, file=output_file)
if self._args and self._args.ecm_plot:
assert plot_support, "matplotlib couldn't be imported. Plotting is not supported."
fig = plt.figure(frameon=False)
self.plot(fig)
|
Plot visualization of model prediction.
|
def plot(self, fig=None):
"""Plot visualization of model prediction."""
if not fig:
fig = plt.gcf()
fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.15)
ax = fig.add_subplot(1, 1, 1)
sorted_overlapping_ports = sorted(
[(p, self.results['port cycles'][p]) for p in self.machine['overlapping ports']],
key=lambda x: x[1])
yticks_labels = []
yticks = []
xticks_labels = []
xticks = []
# Plot configuration
height = 0.9
i = 0
# T_OL
colors = ([(254. / 255, 177. / 255., 178. / 255.)] +
[(255. / 255., 255. / 255., 255. / 255.)] * (len(sorted_overlapping_ports) - 1))
for p, c in sorted_overlapping_ports:
ax.barh(i, c, height, align='center', color=colors.pop(),
edgecolor=(0.5, 0.5, 0.5), linestyle='dashed')
if i == len(sorted_overlapping_ports) - 1:
ax.text(c / 2.0, i, '$T_\mathrm{OL}$', ha='center', va='center')
yticks_labels.append(p)
yticks.append(i)
i += 1
xticks.append(sorted_overlapping_ports[-1][1])
xticks_labels.append('{:.1f}'.format(sorted_overlapping_ports[-1][1]))
# T_nOL + memory transfers
y = 0
colors = [(187. / 255., 255 / 255., 188. / 255.)] * (len(self.results['cycles'])) + \
[(119. / 255, 194. / 255., 255. / 255.)]
for k, v in [('nOL', self.results['T_nOL'])] + self.results['cycles']:
ax.barh(i, v, height, y, align='center', color=colors.pop())
ax.text(y + v / 2.0, i, '$T_\mathrm{' + k + '}$', ha='center', va='center')
xticks.append(y + v)
xticks_labels.append('{:.1f}'.format(y + v))
y += v
yticks_labels.append('LD')
yticks.append(i)
ax.tick_params(axis='y', which='both', left='off', right='off')
ax.tick_params(axis='x', which='both', top='off')
ax.set_xlabel('t [cy]')
ax.set_ylabel('execution port')
ax.set_yticks(yticks)
ax.set_yticklabels(yticks_labels)
ax.set_xticks(xticks)
ax.set_xticklabels(xticks_labels, rotation='vertical')
ax.xaxis.grid(alpha=0.7, linestyle='--')
fig.savefig(self._args.ecm_plot)
|
Strip whitespaces and comments from asm lines.
|
def strip_and_uncomment(asm_lines):
"""Strip whitespaces and comments from asm lines."""
asm_stripped = []
for line in asm_lines:
# Strip comments and whitespaces
asm_stripped.append(line.split('#')[0].strip())
return asm_stripped
|
Strip all labels which are never referenced.
|
def strip_unreferenced_labels(asm_lines):
"""Strip all labels, which are never referenced."""
asm_stripped = []
for line in asm_lines:
if re.match(r'^\S+:', line):
# Found label
label = line[0:line.find(':')]
# Search for references to current label
if not any([re.match(r'^[^#]*\s' + re.escape(label) + '[\s,]?.*$', l) for l in asm_lines]):
# Skip labels without seen reference
line = ''
asm_stripped.append(line)
return asm_stripped
|
Find blocks probably corresponding to loops in assembly.
|
def find_asm_blocks(asm_lines):
"""Find blocks probably corresponding to loops in assembly."""
blocks = []
last_labels = OrderedDict()
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
for i, line in enumerate(asm_lines):
# Register access counts
zmm_references += re.findall('%zmm[0-9]+', line)
ymm_references += re.findall('%ymm[0-9]+', line)
xmm_references += re.findall('%xmm[0-9]+', line)
gp_references += re.findall('%r[a-z0-9]+', line)
if re.search(r'\d*\(%\w+(,%\w+)?(,\d)?\)', line):
m = re.search(r'(?P<off>[-]?\d*)\(%(?P<basep>\w+)(,%(?P<idx>\w+))?(?:,(?P<scale>\d))?\)'
r'(?P<eol>$)?',
line)
mem_references.append((
int(m.group('off')) if m.group('off') else 0,
m.group('basep'),
m.group('idx'),
int(m.group('scale')) if m.group('scale') else 1,
'load' if m.group('eol') is None else 'store'))
if re.match(r"^[v]?(mul|add|sub|div|fmadd(132|213|231)?)[h]?p[ds]", line):
if line.startswith('v'):
avx_ctr += 1
packed_ctr += 1
elif re.match(r'^\S+:', line):
# last_labels[label_name] = line_number
last_labels[line[0:line.find(':')]] =i
# Reset counters
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
elif re.match(r'^inc[bwlq]?\s+%[a-z0-9]+', line):
reg_start = line.find('%') + 1
increments[line[reg_start:]] = 1
elif re.match(r'^add[bwlq]?\s+\$[0-9]+,\s*%[a-z0-9]+', line):
const_start = line.find('$') + 1
const_end = line[const_start + 1:].find(',') + const_start + 1
reg_start = line.find('%') + 1
increments[line[reg_start:]] = int(line[const_start:const_end])
elif re.match(r'^dec[bwlq]?', line):
reg_start = line.find('%') + 1
increments[line[reg_start:]] = -1
elif re.match(r'^sub[bwlq]?\s+\$[0-9]+,', line):
const_start = line.find('$') + 1
const_end = line[const_start + 1:].find(',') + const_start + 1
reg_start = line.find('%') + 1
increments[line[reg_start:]] = -int(line[const_start:const_end])
elif last_labels and re.match(r'^j[a-z]+\s+\S+\s*', line):
# End of block(s) due to jump
# Check if jump target matches any previously recoded label
last_label = None
last_label_line = -1
for label_name, label_line in last_labels.items():
if re.match(r'^j[a-z]+\s+' + re.escape(label_name) + r'\s*', line):
# matched
last_label = label_name
last_label_line = label_line
labels = list(last_labels.keys())
if last_label:
# deduce loop increment from memory index register
pointer_increment = None # default -> can not decide, let user choose
possible_idx_regs = None
if mem_references:
# we found memory references to work with
# If store accesses exist, consider only those
store_references = [mref for mref in mem_references
if mref[4] == 'store']
refs = store_references or mem_references
possible_idx_regs = list(set(increments.keys()).intersection(
set([r[1] for r in refs if r[1] is not None] +
[r[2] for r in refs if r[2] is not None])))
for mref in refs:
for reg in list(possible_idx_regs):
# Only consider references with two registers, where one could be an index
if None not in mref[1:3]:
# One needs to mach, other registers will be excluded
if not (reg == mref[1] or reg == mref[2]):
# reg can not be it
possible_idx_regs.remove(reg)
idx_reg = None
if len(possible_idx_regs) == 1:
# good, exactly one register was found
idx_reg = possible_idx_regs[0]
elif possible_idx_regs and itemsEqual([increments[pidxreg]
for pidxreg in possible_idx_regs]):
# multiple were option found, but all have the same increment
# use first match:
idx_reg = possible_idx_regs[0]
if idx_reg:
mem_scales = [mref[3] for mref in refs
if idx_reg == mref[2] or idx_reg == mref[1]]
if itemsEqual(mem_scales):
# good, all scales are equal
try:
pointer_increment = mem_scales[0] * increments[idx_reg]
except:
print("labels", pformat(labels[labels.index(last_label):]))
print("lines", pformat(asm_lines[last_label_line:i + 1]))
print("increments", increments)
print("mem_references", pformat(mem_references))
print("idx_reg", idx_reg)
print("mem_scales", mem_scales)
raise
blocks.append({'first_line': last_label_line,
'last_line': i,
'ops': i - last_label_line,
'labels': labels[labels.index(last_label):],
'packed_instr': packed_ctr,
'avx_instr': avx_ctr,
'XMM': (len(xmm_references), len(set(xmm_references))),
'YMM': (len(ymm_references), len(set(ymm_references))),
'ZMM': (len(zmm_references), len(set(zmm_references))),
'GP': (len(gp_references), len(set(gp_references))),
'regs': (len(xmm_references) + len(ymm_references) +
len(zmm_references) + len(gp_references),
len(set(xmm_references)) + len(set(ymm_references)) +
len(set(zmm_references)) +
len(set(gp_references))),
'pointer_increment': pointer_increment,
'lines': asm_lines[last_label_line:i + 1],
'possible_idx_regs': possible_idx_regs,
'mem_references': mem_references,
'increments': increments, })
# Reset counters
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
last_labels = OrderedDict()
return list(enumerate(blocks))
|
Return best block selected based on simple heuristic.
|
def select_best_block(blocks):
"""Return best block selected based on simple heuristic."""
# TODO make this cleverer with more stats
if not blocks:
raise ValueError("No suitable blocks were found in assembly.")
best_block = max(blocks, key=lambda b: b[1]['packed_instr'])
if best_block[1]['packed_instr'] == 0:
best_block = max(blocks,
key=lambda b: (b[1]['ops'] + b[1]['packed_instr'] + b[1]['avx_instr'],
b[1]['ZMM'], b[1]['YMM'], b[1]['XMM']))
return best_block[0]
|
Let user interactively select byte increment.
|
def userselect_increment(block):
"""Let user interactively select byte increment."""
print("Selected block:")
print('\n ' + ('\n '.join(block['lines'])))
print()
increment = None
while increment is None:
increment = input("Choose store pointer increment (number of bytes): ")
try:
increment = int(increment)
except ValueError:
increment = None
block['pointer_increment'] = increment
return increment
|
Let user interactively select block.
|
def userselect_block(blocks, default=None, debug=False):
"""Let user interactively select block."""
print("Blocks found in assembly file:")
print(" block | OPs | pck. | AVX || Registers | ZMM | YMM | XMM | GP ||ptr.inc|\n"
"----------------+-----+------+-----++-----------+----------+----------+----------+---------++-------|")
for idx, b in blocks:
print('{:>2} {b[labels]!r:>12} | {b[ops]:>3} | {b[packed_instr]:>4} | {b[avx_instr]:>3} |'
'| {b[regs][0]:>3} ({b[regs][1]:>3}) | {b[ZMM][0]:>3} ({b[ZMM][1]:>2}) | '
'{b[YMM][0]:>3} ({b[YMM][1]:>2}) | '
'{b[XMM][0]:>3} ({b[XMM][1]:>2}) | {b[GP][0]:>2} ({b[GP][1]:>2}) || '
'{b[pointer_increment]!s:>5} |'.format(idx, b=b))
if debug:
ln = b['first_line']
print(' '*4 + 'Code:')
for l in b['lines']:
print(' '*8 + '{:>5} | {}'.format(ln, l))
ln += 1
print(' '*4 + 'Metadata:')
print(textwrap.indent(
pformat({k: v for k,v in b.items() if k not in ['lines']}),
' '*8))
# Let user select block:
block_idx = -1
while not (0 <= block_idx < len(blocks)):
block_idx = input("Choose block to be marked [" + str(default) + "]: ") or default
try:
block_idx = int(block_idx)
except ValueError:
block_idx = -1
# block = blocks[block_idx][1]
return block_idx
|
Insert IACA marker into list of ASM instructions at given indices.
|
def insert_markers(asm_lines, start_line, end_line):
"""Insert IACA marker into list of ASM instructions at given indices."""
asm_lines = (asm_lines[:start_line] + START_MARKER +
asm_lines[start_line:end_line + 1] + END_MARKER +
asm_lines[end_line + 1:])
return asm_lines
|
Add IACA markers to an assembly file.
|
def iaca_instrumentation(input_file, output_file,
block_selection='auto',
pointer_increment='auto_with_manual_fallback',
debug=False):
"""
Add IACA markers to an assembly file.
If instrumentation fails because loop increment could not determined automatically, a ValueError
is raised.
:param input_file: file-like object to read from
:param output_file: file-like object to write to
:param block_selection: index of the assembly block to instrument, or 'auto' for automatically
using block with the
most vector instructions, or 'manual' to read index to prompt user
:param pointer_increment: number of bytes the pointer is incremented after the loop or
- 'auto': automatic detection, otherwise RuntimeError is raised
- 'auto_with_manual_fallback': like auto with fallback to manual input
- 'manual': prompt user
:param debug: output additional internal analysis information. Only works with manual selection.
:return: the instrumented assembly block
"""
assembly_orig = input_file.readlines()
# If input and output files are the same, overwrite with output
if input_file is output_file:
output_file.seek(0)
output_file.truncate()
if debug:
block_selection = 'manual'
assembly = strip_and_uncomment(copy(assembly_orig))
assembly = strip_unreferenced_labels(assembly)
blocks = find_asm_blocks(assembly)
if block_selection == 'auto':
block_idx = select_best_block(blocks)
elif block_selection == 'manual':
block_idx = userselect_block(blocks, default=select_best_block(blocks), debug=debug)
elif isinstance(block_selection, int):
block_idx = block_selection
else:
raise ValueError("block_selection has to be an integer, 'auto' or 'manual' ")
block = blocks[block_idx][1]
if pointer_increment == 'auto':
if block['pointer_increment'] is None:
raise RuntimeError("pointer_increment could not be detected automatically. Use "
"--pointer-increment to set manually to byte offset of store "
"pointer address between consecutive assembly block iterations.")
elif pointer_increment == 'auto_with_manual_fallback':
if block['pointer_increment'] is None:
block['pointer_increment'] = userselect_increment(block)
elif pointer_increment == 'manual':
block['pointer_increment'] = userselect_increment(block)
elif isinstance(pointer_increment, int):
block['pointer_increment'] = pointer_increment
else:
raise ValueError("pointer_increment has to be an integer, 'auto', 'manual' or "
"'auto_with_manual_fallback' ")
instrumented_asm = insert_markers(assembly_orig, block['first_line'], block['last_line'])
output_file.writelines(instrumented_asm)
return block
|
Run IACA analysis on an instrumented binary.
|
def iaca_analyse_instrumented_binary(instrumented_binary_file, micro_architecture):
"""
Run IACA analysis on an instrumented binary.
:param instrumented_binary_file: path of binary that was built with IACA markers
:param micro_architecture: micro architecture string as taken by IACA.
one of: NHM, WSM, SNB, IVB, HSW, BDW
:return: a dictionary with the following keys:
- 'output': the output of the iaca executable
- 'throughput': the block throughput in cycles for one possibly vectorized loop iteration
- 'port cycles': dict, mapping port name to number of active cycles
- 'uops': total number of Uops
"""
# Select IACA version and executable based on micro_architecture:
arch_map = {
# arch: (binary name, version string, required additional arguments)
'NHM': ('iaca2.2', 'v2.2', ['-64']),
'WSM': ('iaca2.2', 'v2.2', ['-64']),
'SNB': ('iaca2.3', 'v2.3', ['-64']),
'IVB': ('iaca2.3', 'v2.3', ['-64']),
'HSW': ('iaca3.0', 'v3.0', []),
'BDW': ('iaca3.0', 'v3.0', []),
'SKL': ('iaca3.0', 'v3.0', []),
'SKX': ('iaca3.0', 'v3.0', []),
}
if micro_architecture not in arch_map:
raise ValueError('Invalid micro_architecture selected ({}), valid options are {}'.format(
micro_architecture, ', '.join(arch_map.keys())))
iaca_path = iaca_get.find_iaca() # Throws exception if not found
os.environ['PATH'] += ':' + iaca_path
iaca_exec, iaca_version, base_args = arch_map[micro_architecture]
if find_executable(iaca_exec) is None:
raise RuntimeError("{0} executable was not found. Make sure that {0} is found in "
"{1}. Install using iaca_get.".format(iaca_exec, iaca_path))
result = {}
cmd = [iaca_exec] + base_args + ['-arch', micro_architecture, instrumented_binary_file]
try:
iaca_output = subprocess.check_output(cmd).decode('utf-8')
result['output'] = iaca_output
except OSError as e:
raise RuntimeError("IACA execution failed:" + ' '.join(cmd) + '\n' + str(e))
except subprocess.CalledProcessError as e:
raise RuntimeError("IACA throughput analysis failed:" + str(e))
# Get total cycles per loop iteration
match = re.search(r'^Block Throughput: ([0-9.]+) Cycles', iaca_output, re.MULTILINE)
assert match, "Could not find Block Throughput in IACA output."
throughput = float(match.groups()[0])
result['throughput'] = throughput
# Find ports and cycles per port
ports = [l for l in iaca_output.split('\n') if l.startswith('| Port |')]
cycles = [l for l in iaca_output.split('\n') if l.startswith('| Cycles |')]
assert ports and cycles, "Could not find ports/cycles lines in IACA output."
ports = [p.strip() for p in ports[0].split('|')][2:]
cycles = [c.strip() for c in cycles[0].split('|')][2:]
port_cycles = []
for i in range(len(ports)):
if '-' in ports[i] and ' ' in cycles[i]:
subports = [p.strip() for p in ports[i].split('-')]
subcycles = [c for c in cycles[i].split(' ') if bool(c)]
port_cycles.append((subports[0], float(subcycles[0])))
port_cycles.append((subports[0] + subports[1], float(subcycles[1])))
elif ports[i] and cycles[i]:
port_cycles.append((ports[i], float(cycles[i])))
result['port cycles'] = dict(port_cycles)
match = re.search(r'^Total Num Of Uops: ([0-9]+)', iaca_output, re.MULTILINE)
assert match, "Could not find Uops in IACA output."
result['uops'] = float(match.groups()[0])
return result
|
Execute command line interface.
|
def main():
"""Execute command line interface."""
parser = argparse.ArgumentParser(
description='Find and analyze basic loop blocks and mark for IACA.',
epilog='For help, examples, documentation and bug reports go to:\nhttps://github.com'
'/RRZE-HPC/kerncraft\nLicense: AGPLv3')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__))
parser.add_argument('source', type=argparse.FileType(), nargs='?', default=sys.stdin,
help='assembly file to analyze (default: stdin)')
parser.add_argument('--outfile', '-o', type=argparse.FileType('w'), nargs='?',
default=sys.stdout, help='output file location (default: stdout)')
parser.add_argument('--debug', action='store_true',
help='Output nternal analysis information for debugging.')
args = parser.parse_args()
# pointer_increment is given, since it makes no difference on the command lien and requires
# less user input
iaca_instrumentation(input_file=args.source, output_file=args.outfile,
block_selection='manual', pointer_increment=1, debug=args.debug)
|
Setup and execute model with given blocking length
|
def simulate(kernel, model, define_dict, blocking_constant, blocking_length):
"""Setup and execute model with given blocking length"""
kernel.clear_state()
# Add constants from define arguments
for k, v in define_dict.items():
kernel.set_constant(k, v)
kernel.set_constant(blocking_constant, blocking_length)
model.analyze()
return sum([cy for dscr, cy in model.results['cycles']])
|
returns the largest prefix where the relative error is bellow * max_error * although rounded by * round_length *
|
def good_prefix(self, max_error=0.01, round_length=2, min_prefix='', max_prefix=None):
"""
returns the largest prefix where the relative error is bellow *max_error* although rounded
by *round_length*
if *max_prefix* is found in PrefixedUnit.PREFIXES, returned value will not exceed this
prefix.
if *min_prefix* is given, returned value will atleast be of that prefix (no matter the
error)
"""
good_prefix = min_prefix
base_value = self.base_value()
for k, v in list(self.PREFIXES.items()):
# Ignoring to large prefixes
if max_prefix is not None and v > self.PREFIXES[max_prefix]:
continue
# Check that differences is < relative error *max_error*
if abs(round(base_value/v, round_length)*v - base_value) > base_value*max_error:
continue
# Check that resulting number is >= 0.9
if abs(round(base_value/v, round_length)) < 0.9:
continue
# Check if prefix is larger then already chosen
if v < self.PREFIXES[good_prefix]:
continue
# seems to be okay
good_prefix = k
return good_prefix
|
Return list of evenly spaced integers over an interval.
|
def space(start, stop, num, endpoint=True, log=False, base=10):
"""
Return list of evenly spaced integers over an interval.
Numbers can either be evenly distributed in a linear space (if *log* is False) or in a log
space (if *log* is True). If *log* is True, base is used to define the log space basis.
If *endpoint* is True, *stop* will be the last retruned value, as long as *num* >= 2.
"""
assert type(start) is int and type(stop) is int and type(num) is int, \
"start, stop and num need to be intergers"
assert num >= 2, "num has to be atleast 2"
if log:
start = math.log(start, base)
stop = math.log(stop, base)
if endpoint:
step_length = float((stop - start)) / float(num - 1)
else:
step_length = float((stop - start)) / float(num)
i = 0
while i < num:
if log:
yield int(round(base ** (start + i * step_length)))
else:
yield int(round(start + i * step_length))
i += 1
|
Return datetime object of latest change in kerncraft module directory.
|
def get_last_modified_datetime(dir_path=os.path.dirname(__file__)):
"""Return datetime object of latest change in kerncraft module directory."""
max_mtime = 0
for root, dirs, files in os.walk(dir_path):
for f in files:
p = os.path.join(root, f)
try:
max_mtime = max(max_mtime, os.stat(p).st_mtime)
except FileNotFoundError:
pass
return datetime.utcfromtimestamp(max_mtime)
|
Return argparse parser.
|
def create_parser():
"""Return argparse parser."""
parser = argparse.ArgumentParser(
description='Analytical performance modelling and benchmarking toolkit.',
epilog='For help, examples, documentation and bug reports go to:\nhttps://github.com'
'/RRZE-HPC/kerncraft\nLicense: AGPLv3')
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__))
parser.add_argument('--machine', '-m', type=argparse.FileType('r'), required=True,
help='Path to machine description yaml file.')
parser.add_argument('--pmodel', '-p', choices=models.__all__, required=True, action='append',
default=[], help='Performance model to apply')
parser.add_argument('-D', '--define', nargs=2, metavar=('KEY', 'VALUE'), default=[],
action=AppendStringRange,
help='Define constant to be used in C code. Values must be integer or '
'match start-stop[:num[log[base]]]. If range is given, all '
'permutation s will be tested. Overwrites constants from testcase '
'file.')
parser.add_argument('--verbose', '-v', action='count', default=0,
help='Increases verbosity level.')
parser.add_argument('code_file', metavar='FILE', type=argparse.FileType(),
help='File with loop kernel C code')
parser.add_argument('--asm-block', metavar='BLOCK', default='auto',
help='Number of ASM block to mark for IACA, "auto" for automatic '
'selection or "manual" for interactiv selection.')
parser.add_argument('--pointer-increment', metavar='INCR', default='auto', type=int_or_str,
help='Increment of store pointer within one ASM block in bytes. If "auto": '
'automatic detection, error on failure to detect, if '
'"auto_with_manual_fallback": fallback to manual input, or if '
'"manual": always prompt user.')
parser.add_argument('--store', metavar='PICKLE', type=argparse.FileType('a+b'),
help='Addes results to PICKLE file for later processing.')
parser.add_argument('--unit', '-u', choices=['cy/CL', 'cy/It', 'It/s', 'FLOP/s'],
help='Select the output unit, defaults to model specific if not given.')
parser.add_argument('--cores', '-c', metavar='CORES', type=int, default=1,
help='Number of cores to be used in parallel. (default: 1) '
'ECM model will consider the scaling of the last level cache and '
'predict the overall performance in addition to single-core behavior. '
'The benchmark mode will run the code with OpenMP on as many physical '
'cores.')
parser.add_argument('--kernel-description', action='store_true',
help='Use kernel description instead of analyzing the kernel code.')
parser.add_argument('--clean-intermediates', action='store_true',
help='If set, will delete all intermediate files after completion.')
# Needed for ECM, ECMData and Roofline model:
parser.add_argument('--cache-predictor', '-P', choices=['LC', 'SIM'], default='SIM',
help='Change cache predictor to use, options are LC (layer conditions) and '
'SIM (cache simulation with pycachesim), default is SIM.')
# Needed for ECM, RooflineIACA and Benchmark model:
parser.add_argument('--compiler', '-C', type=str, default=None,
help='Compiler to use, default is first in machine description file.')
parser.add_argument('--compiler-flags', type=str, default=None,
help='Compiler flags to use. If not set, flags are taken from machine '
'description file (-std=c99 is always added).')
for m in models.__all__:
ag = parser.add_argument_group('arguments for ' + m + ' model', getattr(models, m).name)
getattr(models, m).configure_arggroup(ag)
return parser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.