INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
: Quality function using muparser to generate new Quality for every vertex<br > It s possibile to use the following per - vertex variables in the expression: <br > x y z nx ny nz ( normal ) r g b ( color ) q ( quality ) rad vi <br > and all custom <i > vertex attributes</ i > already defined by user.
def vq_function(script, function='vi', normalize=False, color=False): """:Quality function using muparser to generate new Quality for every vertex<br>It's possibile to use the following per-vertex variables in the expression:<br>x, y, z, nx, ny, nz (normal), r, g, b (color), q (quality), rad, vi, <br>and all custom <i>vertex attributes</i> already defined by user. function function to generate new Quality for every vertex normalize if checked normalize all quality values in range [0..1] color if checked map quality generated values into per-vertex color """ filter_xml = ''.join([ ' <filter name="Per Vertex Quality Function">\n', ' <Param name="q" ', 'value="{}" '.format(str(function).replace('&', '&amp;').replace('<', '&lt;')), 'description="func q = " ', 'type="RichString" ', '/>\n', ' <Param name="normalize" ', 'value="{}" '.format(str(normalize).lower()), 'description="normalize" ', 'type="RichBool" ', '/>\n', ' <Param name="map" ', 'value="{}" '.format(str(color).lower()), 'description="map into color" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Rainbow colored voronoi quatrefoil ( 3 4 ) torus knot
def quatrefoil(): """ Rainbow colored voronoi quatrefoil (3,4) torus knot """ start_time = time.time() os.chdir(THIS_SCRIPTPATH) #ml_version = '1.3.4BETA' ml_version = '2016.12' # Add meshlabserver directory to OS PATH; omit this if it is already in # your PATH meshlabserver_path = 'C:\\Program Files\\VCG\\MeshLab' """ if ml_version is '1.3.4BETA': meshlabserver_path = 'C:\\Program Files\\VCG\\MeshLab' elif ml_version is '2016.12': meshlabserver_path = 'C:\\Program Files\\VCG\\MeshLab_2016_12' """ os.environ['PATH'] = meshlabserver_path + os.pathsep + os.environ['PATH'] # Cross section parameters length = math.radians(360) tube_size = [10, 10, length] segments = [64, 64, 720*2] inner_radius = 2.0 # Sinusoidal deformation parametera amplitude = 4.2 freq = 4 phase = 270 center = 'r' start_pt = 0 increment = 'z-{}'.format(start_pt) # Cyclic rainbow color parameters c_start_pt = 0 c_freq = 5 c_phase_shift = 0 #90 #300 c_phase = (0 + c_phase_shift, 120 + c_phase_shift, 240 + c_phase_shift, 0) # Voronoi surface parameters holes = [2, 2, 44] # Number of holes in each axis; x are sides, y is outside web_thickness = 0.5 solid_radius = 5.0 # If the mesh is smaller than this radius the holes will be closed faces_surface = 50000 # Voronoi solid parameters voxel = 0.5 thickness = 2.5 faces_solid = 200000 # Scaling parameters size = 75 # desired max size of the curve curve_max_size = 2*(1 + 1.5) # the 1.5 s/b inner_radius, but am keepng current scaling scale = (size-2*(thickness + amplitude) - tube_size[1])/curve_max_size # File names file_color = 'quatrefoil_color.ply' file_voronoi_surf = 'quatrefoil_voronoi_surf.ply' file_voronoi_solid = 'quatrefoil_voronoi_solid.ply' file_voronoi_color = 'quatrefoil_voronoi_final.ply' # Create FilterScript objects for each step in the process quatrefoil_color = mlx.FilterScript( file_in=None, file_out=file_color, ml_version=ml_version) quatrefoil_voronoi_surf = mlx.FilterScript( file_in=file_color, file_out=file_voronoi_surf, ml_version=ml_version) quatrefoil_voronoi_solid = mlx.FilterScript( file_in=file_voronoi_surf, file_out=file_voronoi_solid, ml_version=ml_version) quatrefoil_voronoi_color = mlx.FilterScript( file_in=[file_color, file_voronoi_solid], file_out=file_voronoi_color, ml_version=ml_version) print('\n Create colored quatrefoil curve ...') mlx.create.cube_open_hires( quatrefoil_color, size=tube_size, x_segments=segments[0], y_segments=segments[1], z_segments=segments[2], center=True) mlx.transform.translate(quatrefoil_color, [0, 0, length/2]) # Sinusoidal deformation r_func = '({a})*sin(({f})*({i}) + ({p})) + ({c})'.format( f=freq, i=increment, p=math.radians(phase), a=amplitude, c=center) mlx.transform.function_cyl_co( quatrefoil_color, r_func=r_func, theta_func='theta', z_func='z') # Save max radius in quality field so that we can save it with the file # for use in the next step max_radius = math.sqrt((tube_size[0]/2)**2+(tube_size[1]/2)**2) # at corners q_func = '({a})*sin(({f})*({i}) + ({p})) + ({c})'.format( f=freq, i=increment, p=math.radians(phase), a=amplitude, c=max_radius) mlx.mp_func.vq_function(quatrefoil_color, function=q_func) # Apply rainbow vertex colors mlx.vert_color.cyclic_rainbow( quatrefoil_color, direction='z', start_pt=c_start_pt, amplitude=255 / 2, center=255 / 2, freq=c_freq, phase=c_phase) # Deform mesh to quatrefoil curve. Merge vertices after, which # will weld the ends together so it becomes watertight quatrefoil_func = mlx.transform.deform2curve( quatrefoil_color, curve=mlx.mp_func.torus_knot('t', p=3, q=4, scale=scale, radius=inner_radius)) mlx.clean.merge_vert(quatrefoil_color, threshold=0.0001) # Run script mlx.layers.delete_lower(quatrefoil_color) quatrefoil_color.run_script(output_mask='-m vc vq') print('\n Create Voronoi surface ...') # Move quality value into radius attribute mlx.mp_func.vert_attr(quatrefoil_voronoi_surf, name='radius', function='q') # Create seed vertices # For grid style holes, we will create a mesh similar to the original # but with fewer vertices. mlx.create.cube_open_hires( quatrefoil_voronoi_surf, size=tube_size, x_segments=holes[0]+1, y_segments=holes[1]+1, z_segments=holes[2]+1, center=True) mlx.select.all(quatrefoil_voronoi_surf, vert=False) mlx.delete.selected(quatrefoil_voronoi_surf, vert=False) mlx.select.cylindrical_vert(quatrefoil_voronoi_surf, radius=max_radius-0.0001, inside=False) mlx.transform.translate(quatrefoil_voronoi_surf, [0, 0, 20]) mlx.delete.selected(quatrefoil_voronoi_surf, face=False) mlx.transform.function_cyl_co(quatrefoil_voronoi_surf, r_func=r_func, theta_func='theta', z_func='z') mlx.transform.vert_function( quatrefoil_voronoi_surf, x_func=quatrefoil_func[0], y_func=quatrefoil_func[1], z_func=quatrefoil_func[2]) mlx.layers.change(quatrefoil_voronoi_surf, 0) mlx.vert_color.voronoi(quatrefoil_voronoi_surf) if quatrefoil_voronoi_surf.ml_version == '1.3.4BETA': sel_func = '(q <= {}) or ((radius)<={})'.format(web_thickness, solid_radius) else: sel_func = '(q <= {}) || ((radius)<={})'.format(web_thickness, solid_radius) mlx.select.vert_function(quatrefoil_voronoi_surf, function=sel_func) #mlx.select.face_function(quatrefoil_voronoi_surf, function='(vsel0 && vsel1 && vsel2)') mlx.select.invert(quatrefoil_voronoi_surf, face=False) mlx.delete.selected(quatrefoil_voronoi_surf, face=False) mlx.smooth.laplacian(quatrefoil_voronoi_surf, iterations=3) mlx.remesh.simplify(quatrefoil_voronoi_surf, texture=False, faces=faces_surface) mlx.layers.delete_lower(quatrefoil_voronoi_surf) #quatrefoil_voronoi_surf.save_to_file('temp_script.mlx') quatrefoil_voronoi_surf.run_script(script_file=None, output_mask='-m vc vq') print('\n Solidify Voronoi surface ...') mlx.remesh.uniform_resampling(quatrefoil_voronoi_solid, voxel=voxel, offset=thickness/2, thicken=True) mlx.layers.delete_lower(quatrefoil_voronoi_solid) quatrefoil_voronoi_solid.run_script() print('\n Clean up & transfer color to final model ...') # Clean up from uniform mesh resamplng mlx.delete.small_parts(quatrefoil_voronoi_color) mlx.delete.unreferenced_vert(quatrefoil_voronoi_color) mlx.delete.faces_from_nonmanifold_edges(quatrefoil_voronoi_color) mlx.clean.split_vert_on_nonmanifold_face(quatrefoil_voronoi_color) mlx.clean.close_holes(quatrefoil_voronoi_color) # Simplify (to improve triangulation quality), refine, & smooth mlx.remesh.simplify(quatrefoil_voronoi_color, texture=False, faces=faces_solid) mlx.subdivide.ls3loop(quatrefoil_voronoi_color, iterations=1) mlx.smooth.laplacian(quatrefoil_voronoi_color, iterations=3) # Transfer colors from original curve mlx.transfer.vert_attr_2_meshes( quatrefoil_voronoi_color, source_mesh=0, target_mesh=1, color=True, max_distance=7) mlx.layers.delete_lower(quatrefoil_voronoi_color) quatrefoil_voronoi_color.run_script(script_file=None) print(' done! Took %.1f sec' % (time.time() - start_time)) return None
Invert faces orientation flipping the normals of the mesh.
def flip(script, force_flip=False, selected=False): """ Invert faces orientation, flipping the normals of the mesh. If requested, it tries to guess the right orientation; mainly it decides to flip all the faces if the minimum/maximum vertexes have not outward point normals for a few directions. Works well for single component watertight objects. Args: script: the FilterScript object or script filename to write the filter to. force_flip (bool): If selected, the normals will always be flipped; otherwise, the filter tries to set them outside. selected (bool): If selected, only selected faces will be affected. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Invert Faces Orientation">\n', ' <Param name="forceFlip" ', 'value="{}" '.format(str(force_flip).lower()), 'description="Force Flip" ', 'type="RichBool" ', '/>\n', ' <Param name="onlySelected" ', 'value="{}" '.format(str(selected).lower()), 'description="Flip only selected faces" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Compute the normals of the vertices of a mesh without exploiting the triangle connectivity useful for dataset with no faces.
def point_sets(script, neighbors=10, smooth_iteration=0, flip=False, viewpoint_pos=(0.0, 0.0, 0.0)): """ Compute the normals of the vertices of a mesh without exploiting the triangle connectivity, useful for dataset with no faces. Args: script: the FilterScript object or script filename to write the filter to. neighbors (int): The number of neighbors used to estimate normals. smooth_iteration (int): The number of smoothing iteration done on the p used to estimate and propagate normals. flip (bool): Flip normals w.r.t. viewpoint. If the 'viewpoint' (i.e. scanner position) is known, it can be used to disambiguate normals orientation, so that all the normals will be oriented in the same direction. viewpoint_pos (single xyz point, tuple or list): Set the x, y, z coordinates of the viewpoint position. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Compute normals for point sets">\n', ' <Param name="K" ', 'value="{:d}" '.format(neighbors), 'description="Neighbour num" ', 'type="RichInt" ', '/>\n', ' <Param name="smoothIter" ', 'value="{:d}" '.format(smooth_iteration), 'description="Smooth Iteration" ', 'type="RichInt" ', '/>\n', ' <Param name="flipFlag" ', 'value="{}" '.format(str(flip).lower()), 'description="Flip normals w.r.t. viewpoint" ', 'type="RichBool" ', '/>\n', ' <Param name="viewPos" ', 'x="{}" y="{}" z="{}" '.format(viewpoint_pos[0], viewpoint_pos[1], viewpoint_pos[2],), 'description="Viewpoint Pos." ', 'type="RichPoint3f" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Laplacian smooth of the mesh: for each vertex it calculates the average position with nearest vertex
def laplacian(script, iterations=1, boundary=True, cotangent_weight=True, selected=False): """ Laplacian smooth of the mesh: for each vertex it calculates the average position with nearest vertex Args: script: the FilterScript object or script filename to write the filter to. iterations (int): The number of times that the whole algorithm (normal smoothing + vertex fitting) is iterated. boundary (bool): If true the boundary edges are smoothed only by themselves (e.g. the polyline forming the boundary of the mesh is independently smoothed). Can reduce the shrinking on the border but can have strange effects on very small boundaries. cotangent_weight (bool): If True the cotangent weighting scheme is computed for the averaging of the position. Otherwise (False) the simpler umbrella scheme (1 if the edge is present) is used. selected (bool): If selected the filter is performed only on the selected faces Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Laplacian Smooth">\n', ' <Param name="stepSmoothNum" ', 'value="{:d}" '.format(iterations), 'description="Smoothing steps" ', 'type="RichInt" ', '/>\n', ' <Param name="Boundary" ', 'value="{}" '.format(str(boundary).lower()), 'description="1D Boundary Smoothing" ', 'type="RichBool" ', '/>\n', ' <Param name="cotangentWeight" ', 'value="{}" '.format(str(cotangent_weight).lower()), 'description="Cotangent weighting" ', 'type="RichBool" ', '/>\n', ' <Param name="Selected" ', 'value="{}" '.format(str(selected).lower()), 'description="Affect only selected faces" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
The lambda & mu Taubin smoothing it make two steps of smoothing forth and back for each iteration.
def taubin(script, iterations=10, t_lambda=0.5, t_mu=-0.53, selected=False): """ The lambda & mu Taubin smoothing, it make two steps of smoothing, forth and back, for each iteration. Based on: Gabriel Taubin "A signal processing approach to fair surface design" Siggraph 1995 Args: script: the FilterScript object or script filename to write the filter to. iterations (int): The number of times that the taubin smoothing is iterated. Usually it requires a larger number of iteration than the classical laplacian. t_lambda (float): The lambda parameter of the Taubin Smoothing algorithm t_mu (float): The mu parameter of the Taubin Smoothing algorithm selected (bool): If selected the filter is performed only on the selected faces Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Taubin Smooth">\n', ' <Param name="lambda" ', 'value="{}" '.format(t_lambda), 'description="Lambda" ', 'type="RichFloat" ', '/>\n', ' <Param name="mu" ', 'value="{}" '.format(t_mu), 'description="mu" ', 'type="RichFloat" ', '/>\n', ' <Param name="stepSmoothNum" ', 'value="{:d}" '.format(iterations), 'description="Smoothing steps" ', 'type="RichInt" ', '/>\n', ' <Param name="Selected" ', 'value="{}" '.format(str(selected).lower()), 'description="Affect only selected faces" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Two Step Smoothing a feature preserving/ enhancing fairing filter.
def twostep(script, iterations=3, angle_threshold=60, normal_steps=20, fit_steps=20, selected=False): """ Two Step Smoothing, a feature preserving/enhancing fairing filter. It is based on a Normal Smoothing step where similar normals are averaged together and a step where the vertexes are fitted on the new normals. Based on: A. Belyaev and Y. Ohtake, "A Comparison of Mesh Smoothing Methods" Proc. Israel-Korea Bi-National Conf. Geometric Modeling and Computer Graphics, pp. 83-87, 2003. Args: script: the FilterScript object or script filename to write the filter to. iterations (int): The number of times that the whole algorithm (normal smoothing + vertex fitting) is iterated. angle_threshold (float): Specify a threshold angle (0..90) for features that you want to be preserved. Features forming angles LARGER than the specified threshold will be preserved. 0 -> no smoothing 90 -> all faces will be smoothed normal_steps (int): Number of iterations of normal smoothing step. The larger the better and (the slower) fit_steps (int): Number of iterations of the vertex fitting procedure selected (bool): If selected the filter is performed only on the selected faces Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="TwoStep Smooth">\n', ' <Param name="stepSmoothNum" ', 'value="{:d}" '.format(iterations), 'description="Smoothing steps" ', 'type="RichInt" ', '/>\n', ' <Param name="normalThr" ', 'value="{}" '.format(angle_threshold), 'description="Feature Angle Threshold (deg)" ', 'type="RichFloat" ', '/>\n', ' <Param name="stepNormalNum" ', 'value="{:d}" '.format(normal_steps), 'description="Normal Smoothing steps" ', 'type="RichInt" ', '/>\n', ' <Param name="stepFitNum" ', 'value="{:d}" '.format(fit_steps), 'description="Vertex Fitting steps" ', 'type="RichInt" ', '/>\n', ' <Param name="Selected" ', 'value="{}" '.format(str(selected).lower()), 'description="Affect only selected faces" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
A laplacian smooth that is constrained to move vertices only along the view direction.
def depth(script, iterations=3, viewpoint=(0, 0, 0), selected=False): """ A laplacian smooth that is constrained to move vertices only along the view direction. Args: script: the FilterScript object or script filename to write the filter to. iterations (int): The number of times that the whole algorithm (normal smoothing + vertex fitting) is iterated. viewpoint (vector tuple or list): The position of the view point that is used to get the constraint direction. selected (bool): If selected the filter is performed only on the selected faces Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Depth Smooth">\n', ' <Param name="stepSmoothNum" ', 'value="{:d}" '.format(iterations), 'description="Smoothing steps" ', 'type="RichInt" ', '/>\n', ' <Param name="viewPoint" ', 'x="{}" '.format(viewpoint[0]), 'y="{}" '.format(viewpoint[1]), 'z="{}" '.format(viewpoint[2]), 'description="Smoothing steps" ', 'type="RichPoint3f" ', '/>\n', ' <Param name="Selected" ', 'value="{}" '.format(str(selected).lower()), 'description="Affect only selected faces" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Measure the axis aligned bounding box ( aabb ) of a mesh in multiple coordinate systems.
def measure_aabb(fbasename=None, log=None, coord_system='CARTESIAN'): """ Measure the axis aligned bounding box (aabb) of a mesh in multiple coordinate systems. Args: fbasename (str): filename of input model log (str): filename of log file coord_system (enum in ['CARTESIAN', 'CYLINDRICAL'] Coordinate system to use: 'CARTESIAN': lists contain [x, y, z] 'CYLINDRICAL': lists contain [r, theta, z] Returns: dict: dictionary with the following aabb properties min (3 element list): minimum values max (3 element list): maximum values center (3 element list): the center point size (3 element list): size of the aabb in each coordinate (max-min) diagonal (float): the diagonal of the aabb """ # TODO: add center point, spherical coordinate system fext = os.path.splitext(fbasename)[1][1:].strip().lower() if fext != 'xyz': fin = 'TEMP3D_aabb.xyz' run(log=log, file_in=fbasename, file_out=fin, script=None) else: fin = fbasename fread = open(fin, 'r') aabb = {'min': [999999.0, 999999.0, 999999.0], 'max': [-999999.0, -999999.0, -999999.0]} for line in fread: x_co, y_co, z_co = line.split() x_co = util.to_float(x_co) y_co = util.to_float(y_co) z_co = util.to_float(z_co) if coord_system == 'CARTESIAN': if x_co < aabb['min'][0]: aabb['min'][0] = x_co if y_co < aabb['min'][1]: aabb['min'][1] = y_co if z_co < aabb['min'][2]: aabb['min'][2] = z_co if x_co > aabb['max'][0]: aabb['max'][0] = x_co if y_co > aabb['max'][1]: aabb['max'][1] = y_co if z_co > aabb['max'][2]: aabb['max'][2] = z_co elif coord_system == 'CYLINDRICAL': radius = math.sqrt(x_co**2 + y_co**2) theta = math.degrees(math.atan2(y_co, x_co)) if radius < aabb['min'][0]: aabb['min'][0] = radius if theta < aabb['min'][1]: aabb['min'][1] = theta if z_co < aabb['min'][2]: aabb['min'][2] = z_co if radius > aabb['max'][0]: aabb['max'][0] = radius if theta > aabb['max'][1]: aabb['max'][1] = theta if z_co > aabb['max'][2]: aabb['max'][2] = z_co fread.close() try: aabb['center'] = [(aabb['max'][0] + aabb['min'][0]) / 2, (aabb['max'][1] + aabb['min'][1]) / 2, (aabb['max'][2] + aabb['min'][2]) / 2] aabb['size'] = [aabb['max'][0] - aabb['min'][0], aabb['max'][1] - aabb['min'][1], aabb['max'][2] - aabb['min'][2]] aabb['diagonal'] = math.sqrt( aabb['size'][0]**2 + aabb['size'][1]**2 + aabb['size'][2]**2) except UnboundLocalError: print('Error: aabb input file does not contain valid data. Exiting ...') sys.exit(1) for key, value in aabb.items(): if log is None: print('{:10} = {}'.format(key, value)) else: log_file = open(log, 'a') log_file.write('{:10} = {}\n'.format(key, value)) log_file.close() """ if log is not None: log_file = open(log, 'a') #log_file.write('***Axis Aligned Bounding Results for file "%s":\n' % fbasename) log_file.write('min = %s\n' % aabb['min']) log_file.write('max = %s\n' % aabb['max']) log_file.write('center = %s\n' % aabb['center']) log_file.write('size = %s\n' % aabb['size']) log_file.write('diagonal = %s\n\n' % aabb['diagonal']) log_file.close() # print(aabb) """ return aabb
Measure a cross section of a mesh Perform a plane cut in one of the major axes ( X Y Z ). If you want to cut on a different plane you will need to rotate the model in place perform the cut and rotate it back. Args: fbasename ( str ): filename of input model log ( str ): filename of log file axis ( str ): axis perpendicular to the cutting plane e. g. specify z to cut parallel to the XY plane. offset ( float ): amount to offset the cutting plane from the origin rotate_x_angle ( float ): degrees to rotate about the X axis. Useful for correcting Up direction: 90 to rotate Y to Z and - 90 to rotate Z to Y.
def measure_section(fbasename=None, log=None, axis='z', offset=0.0, rotate_x_angle=None, ml_version=ml_version): """Measure a cross section of a mesh Perform a plane cut in one of the major axes (X, Y, Z). If you want to cut on a different plane you will need to rotate the model in place, perform the cut, and rotate it back. Args: fbasename (str): filename of input model log (str): filename of log file axis (str): axis perpendicular to the cutting plane, e.g. specify "z" to cut parallel to the XY plane. offset (float): amount to offset the cutting plane from the origin rotate_x_angle (float): degrees to rotate about the X axis. Useful for correcting "Up" direction: 90 to rotate Y to Z, and -90 to rotate Z to Y. Returns: dict: dictionary with the following keys for the aabb of the section: min (list): list of the x, y & z minimum values max (list): list of the x, y & z maximum values center (list): the x, y & z coordinates of the center of the aabb size (list): list of the x, y & z sizes (max - min) diagonal (float): the diagonal of the aabb """ ml_script1_file = 'TEMP3D_measure_section.mlx' file_out = 'TEMP3D_sect_aabb.xyz' ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version) if rotate_x_angle is not None: transform.rotate(ml_script1, axis='x', angle=rotate_x_angle) compute.section(ml_script1, axis=axis, offset=offset) layers.delete_lower(ml_script1) ml_script1.save_to_file(ml_script1_file) ml_script1.run_script(log=log, script_file=ml_script1_file) aabb = measure_aabb(file_out, log) return aabb
Sort separate line segments in obj format into a continuous polyline or polylines. NOT FINISHED ; DO NOT USE
def polylinesort(fbasename=None, log=None): """Sort separate line segments in obj format into a continuous polyline or polylines. NOT FINISHED; DO NOT USE Also measures the length of each polyline Return polyline and polylineMeta (lengths) """ fext = os.path.splitext(fbasename)[1][1:].strip().lower() if fext != 'obj': print('Input file must be obj. Exiting ...') sys.exit(1) fread = open(fbasename, 'r') first = True polyline_vertices = [] line_segments = [] for line in fread: element, x_co, y_co, z_co = line.split() if element == 'v': polyline_vertices.append( [util.to_float(x_co), util.to_float(y_co), util.to_float(z_co)]) elif element == 'l': p1 = x_co p2 = y_co line_segments.append([int(p1), int(p2)]) fread.close() if log is not None: log_file = open(log, 'a') #log_file.write('***Axis Aligned Bounding Results for file "%s":\n' % fbasename) """log_file.write('min = %s\n' % aabb['min']) log_file.write('max = %s\n' % aabb['max']) log_file.write('center = %s\n' % aabb['center']) log_file.write('size = %s\n' % aabb['size']) log_file.write('diagonal = %s\n' % aabb['diagonal'])""" log_file.close() # print(aabb) return None
Measures mesh topology
def measure_topology(fbasename=None, log=None, ml_version=ml_version): """Measures mesh topology Args: fbasename (str): input filename. log (str): filename to log output Returns: dict: dictionary with the following keys: vert_num (int): number of vertices edge_num (int): number of edges face_num (int): number of faces unref_vert_num (int): number or unreferenced vertices boundry_edge_num (int): number of boundary edges part_num (int): number of parts (components) in the mesh. manifold (bool): True if mesh is two-manifold, otherwise false. non_manifold_edge (int): number of non_manifold edges. non_manifold_vert (int): number of non-manifold verices genus (int or str): genus of the mesh, either a number or 'undefined' if the mesh is non-manifold. holes (int or str): number of holes in the mesh, either a number or 'undefined' if the mesh is non-manifold. """ ml_script1_file = 'TEMP3D_measure_topology.mlx' ml_script1 = mlx.FilterScript(file_in=fbasename, ml_version=ml_version) compute.measure_topology(ml_script1) ml_script1.save_to_file(ml_script1_file) ml_script1.run_script(log=log, script_file=ml_script1_file) topology = ml_script1.topology return topology
Measures mesh geometry aabb and topology.
def measure_all(fbasename=None, log=None, ml_version=ml_version): """Measures mesh geometry, aabb and topology.""" ml_script1_file = 'TEMP3D_measure_gAndT.mlx' if ml_version == '1.3.4BETA': file_out = 'TEMP3D_aabb.xyz' else: file_out = None ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version) compute.measure_geometry(ml_script1) compute.measure_topology(ml_script1) ml_script1.save_to_file(ml_script1_file) ml_script1.run_script(log=log, script_file=ml_script1_file) geometry = ml_script1.geometry topology = ml_script1.topology if ml_version == '1.3.4BETA': if log is not None: log_file = open(log, 'a') log_file.write( '***Axis Aligned Bounding Results for file "%s":\n' % fbasename) log_file.close() aabb = measure_aabb(file_out, log) else: aabb = geometry['aabb'] return aabb, geometry, topology
Measure a dimension of a mesh
def measure_dimension(fbasename=None, log=None, axis1=None, offset1=0.0, axis2=None, offset2=0.0, ml_version=ml_version): """Measure a dimension of a mesh""" axis1 = axis1.lower() axis2 = axis2.lower() ml_script1_file = 'TEMP3D_measure_dimension.mlx' file_out = 'TEMP3D_measure_dimension.xyz' ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version) compute.section(ml_script1, axis1, offset1, surface=True) compute.section(ml_script1, axis2, offset2, surface=False) layers.delete_lower(ml_script1) ml_script1.save_to_file(ml_script1_file) ml_script1.run_script(log=log, script_file=ml_script1_file) for val in ('x', 'y', 'z'): if val not in (axis1, axis2): axis = val # ord: Get number that represents letter in ASCII # Here we find the offset from 'x' to determine the list reference # i.e. 0 for x, 1 for y, 2 for z axis_num = ord(axis) - ord('x') aabb = measure_aabb(file_out, log) dimension = {'min': aabb['min'][axis_num], 'max': aabb['max'][axis_num], 'length': aabb['size'][axis_num], 'axis': axis} if log is None: print('\nFor file "%s"' % fbasename) print('Dimension parallel to %s with %s=%s & %s=%s:' % (axis, axis1, offset1, axis2, offset2)) print(' Min = %s, Max = %s, Total length = %s' % (dimension['min'], dimension['max'], dimension['length'])) else: log_file = open(log, 'a') log_file.write('\nFor file "%s"\n' % fbasename) log_file.write('Dimension parallel to %s with %s=%s & %s=%s:\n' % (axis, axis1, offset1, axis2, offset2)) log_file.write('min = %s\n' % dimension['min']) log_file.write('max = %s\n' % dimension['max']) log_file.write('Total length = %s\n' % dimension['length']) log_file.close() return dimension
This is a helper used by UploadSet. save to provide lowercase extensions for all processed files to compare with configured extensions in the same case.
def lowercase_ext(filename): """ This is a helper used by UploadSet.save to provide lowercase extensions for all processed files, to compare with configured extensions in the same case. .. versionchanged:: 0.1.4 Filenames without extensions are no longer lowercased, only the extension is returned in lowercase, if an extension exists. :param filename: The filename to ensure has a lowercase extension. """ if '.' in filename: main, ext = os.path.splitext(filename) return main + ext.lower() # For consistency with os.path.splitext, # do not treat a filename without an extension as an extension. # That is, do not return filename.lower(). return filename
By default Flask will accept uploads to an arbitrary size. While Werkzeug switches uploads from memory to a temporary file when they hit 500 KiB it s still possible for someone to overload your disk space with a gigantic file.
def patch_request_class(app, size=64 * 1024 * 1024): """ By default, Flask will accept uploads to an arbitrary size. While Werkzeug switches uploads from memory to a temporary file when they hit 500 KiB, it's still possible for someone to overload your disk space with a gigantic file. This patches the app's request class's `~werkzeug.BaseRequest.max_content_length` attribute so that any upload larger than the given size is rejected with an HTTP error. .. note:: In Flask 0.6, you can do this by setting the `MAX_CONTENT_LENGTH` setting, without patching the request class. To emulate this behavior, you can pass `None` as the size (you must pass it explicitly). That is the best way to call this function, as it won't break the Flask 0.6 functionality if it exists. .. versionchanged:: 0.1.1 :param app: The app to patch the request class of. :param size: The maximum size to accept, in bytes. The default is 64 MiB. If it is `None`, the app's `MAX_CONTENT_LENGTH` configuration setting will be used to patch. """ if size is None: if isinstance(app.request_class.__dict__['max_content_length'], property): return size = app.config.get('MAX_CONTENT_LENGTH') reqclass = app.request_class patched = type(reqclass.__name__, (reqclass,), {'max_content_length': size}) app.request_class = patched
This is a helper function for configure_uploads that extracts the configuration for a single set.
def config_for_set(uset, app, defaults=None): """ This is a helper function for `configure_uploads` that extracts the configuration for a single set. :param uset: The upload set. :param app: The app to load the configuration from. :param defaults: A dict with keys `url` and `dest` from the `UPLOADS_DEFAULT_DEST` and `DEFAULT_UPLOADS_URL` settings. """ config = app.config prefix = 'UPLOADED_%s_' % uset.name.upper() using_defaults = False if defaults is None: defaults = dict(dest=None, url=None) allow_extns = tuple(config.get(prefix + 'ALLOW', ())) deny_extns = tuple(config.get(prefix + 'DENY', ())) destination = config.get(prefix + 'DEST') base_url = config.get(prefix + 'URL') if destination is None: # the upload set's destination wasn't given if uset.default_dest: # use the "default_dest" callable destination = uset.default_dest(app) if destination is None: # still # use the default dest from the config if defaults['dest'] is not None: using_defaults = True destination = os.path.join(defaults['dest'], uset.name) else: raise RuntimeError("no destination for set %s" % uset.name) if base_url is None and using_defaults and defaults['url']: base_url = addslash(defaults['url']) + uset.name + '/' return UploadConfiguration(destination, base_url, allow_extns, deny_extns)
Call this after the app has been configured. It will go through all the upload sets get their configuration and store the configuration on the app. It will also register the uploads module if it hasn t been set. This can be called multiple times with different upload sets.
def configure_uploads(app, upload_sets): """ Call this after the app has been configured. It will go through all the upload sets, get their configuration, and store the configuration on the app. It will also register the uploads module if it hasn't been set. This can be called multiple times with different upload sets. .. versionchanged:: 0.1.3 The uploads module/blueprint will only be registered if it is needed to serve the upload sets. :param app: The `~flask.Flask` instance to get the configuration from. :param upload_sets: The `UploadSet` instances to configure. """ if isinstance(upload_sets, UploadSet): upload_sets = (upload_sets,) if not hasattr(app, 'upload_set_config'): app.upload_set_config = {} set_config = app.upload_set_config defaults = dict(dest=app.config.get('UPLOADS_DEFAULT_DEST'), url=app.config.get('UPLOADS_DEFAULT_URL')) for uset in upload_sets: config = config_for_set(uset, app, defaults) set_config[uset.name] = config should_serve = any(s.base_url is None for s in set_config.values()) if '_uploads' not in app.blueprints and should_serve: app.register_blueprint(uploads_mod)
This gets the current configuration. By default it looks up the current application and gets the configuration from there. But if you don t want to go to the full effort of setting an application or it s otherwise outside of a request context set the _config attribute to an UploadConfiguration instance then set it back to None when you re done.
def config(self): """ This gets the current configuration. By default, it looks up the current application and gets the configuration from there. But if you don't want to go to the full effort of setting an application, or it's otherwise outside of a request context, set the `_config` attribute to an `UploadConfiguration` instance, then set it back to `None` when you're done. """ if self._config is not None: return self._config try: return current_app.upload_set_config[self.name] except AttributeError: raise RuntimeError("cannot access configuration outside request")
This function gets the URL a file uploaded to this set would be accessed at. It doesn t check whether said file exists.
def url(self, filename): """ This function gets the URL a file uploaded to this set would be accessed at. It doesn't check whether said file exists. :param filename: The filename to return the URL for. """ base = self.config.base_url if base is None: return url_for('_uploads.uploaded_file', setname=self.name, filename=filename, _external=True) else: return base + filename
This returns the absolute path of a file uploaded to this set. It doesn t actually check whether said file exists.
def path(self, filename, folder=None): """ This returns the absolute path of a file uploaded to this set. It doesn't actually check whether said file exists. :param filename: The filename to return the path for. :param folder: The subfolder within the upload set previously used to save to. """ if folder is not None: target_folder = os.path.join(self.config.destination, folder) else: target_folder = self.config.destination return os.path.join(target_folder, filename)
This determines whether a specific extension is allowed. It is called by file_allowed so if you override that but still want to check extensions call back into this.
def extension_allowed(self, ext): """ This determines whether a specific extension is allowed. It is called by `file_allowed`, so if you override that but still want to check extensions, call back into this. :param ext: The extension to check, without the dot. """ return ((ext in self.config.allow) or (ext in self.extensions and ext not in self.config.deny))
This saves a werkzeug. FileStorage into this upload set. If the upload is not allowed an UploadNotAllowed error will be raised. Otherwise the file will be saved and its name ( including the folder ) will be returned.
def save(self, storage, folder=None, name=None): """ This saves a `werkzeug.FileStorage` into this upload set. If the upload is not allowed, an `UploadNotAllowed` error will be raised. Otherwise, the file will be saved and its name (including the folder) will be returned. :param storage: The uploaded file to save. :param folder: The subfolder within the upload set to save to. :param name: The name to save the file as. If it ends with a dot, the file's extension will be appended to the end. (If you are using `name`, you can include the folder in the `name` instead of explicitly using `folder`, i.e. ``uset.save(file, name="someguy/photo_123.")`` """ if not isinstance(storage, FileStorage): raise TypeError("storage must be a werkzeug.FileStorage") if folder is None and name is not None and "/" in name: folder, name = os.path.split(name) basename = self.get_basename(storage.filename) if name: if name.endswith('.'): basename = name + extension(basename) else: basename = name if not self.file_allowed(storage, basename): raise UploadNotAllowed() if folder: target_folder = os.path.join(self.config.destination, folder) else: target_folder = self.config.destination if not os.path.exists(target_folder): os.makedirs(target_folder) if os.path.exists(os.path.join(target_folder, basename)): basename = self.resolve_conflict(target_folder, basename) target = os.path.join(target_folder, basename) storage.save(target) if folder: return posixpath.join(folder, basename) else: return basename
If a file with the selected name already exists in the target folder this method is called to resolve the conflict. It should return a new basename for the file.
def resolve_conflict(self, target_folder, basename): """ If a file with the selected name already exists in the target folder, this method is called to resolve the conflict. It should return a new basename for the file. The default implementation splits the name and extension and adds a suffix to the name consisting of an underscore and a number, and tries that until it finds one that doesn't exist. :param target_folder: The absolute path to the target. :param basename: The file's original basename. """ name, ext = os.path.splitext(basename) count = 0 while True: count = count + 1 newname = '%s_%d%s' % (name, count, ext) if not os.path.exists(os.path.join(target_folder, newname)): return newname
Returns actual version specified in filename.
def get_vprof_version(filename): """Returns actual version specified in filename.""" with open(filename) as src_file: version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", src_file.read(), re.M) if version_match: return version_match.group(1) raise RuntimeError('Unable to find version info.')
Removes duplicate objects.
def _remove_duplicates(objects): """Removes duplicate objects. http://www.peterbe.com/plog/uniqifiers-benchmark. """ seen, uniq = set(), [] for obj in objects: obj_id = id(obj) if obj_id in seen: continue seen.add(obj_id) uniq.append(obj) return uniq
Returns count difference in two collections of Python objects.
def _get_obj_count_difference(objs1, objs2): """Returns count difference in two collections of Python objects.""" clean_obj_list1 = _process_in_memory_objects(objs1) clean_obj_list2 = _process_in_memory_objects(objs2) obj_count_1 = _get_object_count_by_type(clean_obj_list1) obj_count_2 = _get_object_count_by_type(clean_obj_list2) return obj_count_1 - obj_count_2
Formats object count.
def _format_obj_count(objects): """Formats object count.""" result = [] regex = re.compile(r'<(?P<type>\w+) \'(?P<name>\S+)\'>') for obj_type, obj_count in objects.items(): if obj_count != 0: match = re.findall(regex, repr(obj_type)) if match: obj_type, obj_name = match[0] result.append(("%s %s" % (obj_type, obj_name), obj_count)) return sorted(result, key=operator.itemgetter(1), reverse=True)
Checks memory usage when line event occur.
def _trace_memory_usage(self, frame, event, arg): #pylint: disable=unused-argument """Checks memory usage when 'line' event occur.""" if event == 'line' and frame.f_code.co_filename in self.target_modules: self._events_list.append( (frame.f_lineno, self._process.memory_info().rss, frame.f_code.co_name, frame.f_code.co_filename)) return self._trace_memory_usage
Returns processed memory usage.
def code_events(self): """Returns processed memory usage.""" if self._resulting_events: return self._resulting_events for i, (lineno, mem, func, fname) in enumerate(self._events_list): mem_in_mb = float(mem - self.mem_overhead) / _BYTES_IN_MB if (self._resulting_events and self._resulting_events[-1][0] == lineno and self._resulting_events[-1][2] == func and self._resulting_events[-1][3] == fname and self._resulting_events[-1][1] < mem_in_mb): self._resulting_events[-1][1] = mem_in_mb else: self._resulting_events.append( [i + 1, lineno, mem_in_mb, func, fname]) return self._resulting_events
Returns all objects that are considered a profiler overhead. Objects are hardcoded for convenience.
def obj_overhead(self): """Returns all objects that are considered a profiler overhead. Objects are hardcoded for convenience. """ overhead = [ self, self._resulting_events, self._events_list, self._process ] overhead_count = _get_object_count_by_type(overhead) # One for reference to __dict__ and one for reference to # the current module. overhead_count[dict] += 2 return overhead_count
Returns memory overhead.
def compute_mem_overhead(self): """Returns memory overhead.""" self.mem_overhead = (self._process.memory_info().rss - builtins.initial_rss_size)
Returns memory stats for a package.
def profile_package(self): """Returns memory stats for a package.""" target_modules = base_profiler.get_pkg_module_names(self._run_object) try: with _CodeEventsTracker(target_modules) as prof: prof.compute_mem_overhead() runpy.run_path(self._run_object, run_name='__main__') except SystemExit: pass return prof, None
Returns memory stats for a module.
def profile_module(self): """Returns memory stats for a module.""" target_modules = {self._run_object} try: with open(self._run_object, 'rb') as srcfile,\ _CodeEventsTracker(target_modules) as prof: code = compile(srcfile.read(), self._run_object, 'exec') prof.compute_mem_overhead() exec(code, self._globs, None) except SystemExit: pass return prof, None
Returns memory stats for a function.
def profile_function(self): """Returns memory stats for a function.""" target_modules = {self._run_object.__code__.co_filename} with _CodeEventsTracker(target_modules) as prof: prof.compute_mem_overhead() result = self._run_object(*self._run_args, **self._run_kwargs) return prof, result
Collects memory stats for specified Python program.
def run(self): """Collects memory stats for specified Python program.""" existing_objects = _get_in_memory_objects() prof, result = self.profile() new_objects = _get_in_memory_objects() new_obj_count = _get_obj_count_difference(new_objects, existing_objects) result_obj_count = new_obj_count - prof.obj_overhead # existing_objects list is also profiler overhead result_obj_count[list] -= 1 pretty_obj_count = _format_obj_count(result_obj_count) return { 'objectName': self._object_name, 'codeEvents': prof.code_events, 'totalEvents': len(prof.code_events), 'objectsCount': pretty_obj_count, 'result': result, 'timestamp': int(time.time()) }
Returns module filenames from package.
def get_pkg_module_names(package_path): """Returns module filenames from package. Args: package_path: Path to Python package. Returns: A set of module filenames. """ module_names = set() for fobj, modname, _ in pkgutil.iter_modules(path=[package_path]): filename = os.path.join(fobj.path, '%s.py' % modname) if os.path.exists(filename): module_names.add(os.path.abspath(filename)) return module_names
Runs function in separate process.
def run_in_separate_process(func, *args, **kwargs): """Runs function in separate process. This function is used instead of a decorator, since Python multiprocessing module can't serialize decorated function on all platforms. """ manager = multiprocessing.Manager() manager_dict = manager.dict() process = ProcessWithException( manager_dict, target=func, args=args, kwargs=kwargs) process.start() process.join() exc = process.exception if exc: raise exc return process.output
Determines run object type.
def get_run_object_type(run_object): """Determines run object type.""" if isinstance(run_object, tuple): return 'function' run_object, _, _ = run_object.partition(' ') if os.path.isdir(run_object): return 'package' return 'module'
Initializes profiler with a module.
def init_module(self, run_object): """Initializes profiler with a module.""" self.profile = self.profile_module self._run_object, _, self._run_args = run_object.partition(' ') self._object_name = '%s (module)' % self._run_object self._globs = { '__file__': self._run_object, '__name__': '__main__', '__package__': None, } program_path = os.path.dirname(self._run_object) if sys.path[0] != program_path: sys.path.insert(0, program_path) self._replace_sysargs()
Initializes profiler with a package.
def init_package(self, run_object): """Initializes profiler with a package.""" self.profile = self.profile_package self._run_object, _, self._run_args = run_object.partition(' ') self._object_name = '%s (package)' % self._run_object self._replace_sysargs()
Initializes profiler with a function.
def init_function(self, run_object): """Initializes profiler with a function.""" self.profile = self.profile_function self._run_object, self._run_args, self._run_kwargs = run_object filename = inspect.getsourcefile(self._run_object) self._object_name = '%s @ %s (function)' % ( self._run_object.__name__, filename)
Replaces sys. argv with proper args to pass to script.
def _replace_sysargs(self): """Replaces sys.argv with proper args to pass to script.""" sys.argv[:] = [self._run_object] if self._run_args: sys.argv += self._run_args.split()
Samples current stack and adds result in self. _stats.
def sample(self, signum, frame): #pylint: disable=unused-argument """Samples current stack and adds result in self._stats. Args: signum: Signal that activates handler. frame: Frame on top of the stack when signal is handled. """ stack = [] while frame and frame != self.base_frame: stack.append(( frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno)) frame = frame.f_back self._stats[tuple(stack)] += 1 signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL)
Inserts stack into the call tree.
def _insert_stack(stack, sample_count, call_tree): """Inserts stack into the call tree. Args: stack: Call stack. sample_count: Sample count of call stack. call_tree: Call tree. """ curr_level = call_tree for func in stack: next_level_index = { node['stack']: node for node in curr_level['children']} if func not in next_level_index: new_node = {'stack': func, 'children': [], 'sampleCount': 0} curr_level['children'].append(new_node) curr_level = new_node else: curr_level = next_level_index[func] curr_level['sampleCount'] = sample_count
Counts and fills sample counts inside call tree.
def _fill_sample_count(self, node): """Counts and fills sample counts inside call tree.""" node['sampleCount'] += sum( self._fill_sample_count(child) for child in node['children']) return node['sampleCount']
Reformats call tree for the UI.
def _format_tree(self, node, total_samples): """Reformats call tree for the UI.""" funcname, filename, _ = node['stack'] sample_percent = self._get_percentage( node['sampleCount'], total_samples) color_hash = base_profiler.hash_name('%s @ %s' % (funcname, filename)) return { 'stack': node['stack'], 'children': [self._format_tree(child, total_samples) for child in node['children']], 'sampleCount': node['sampleCount'], 'samplePercentage': sample_percent, 'colorHash': color_hash }
Returns call tree.
def call_tree(self): """Returns call tree.""" call_tree = {'stack': 'base', 'sampleCount': 0, 'children': []} for stack, sample_count in self._stats.items(): self._insert_stack(reversed(stack), sample_count, call_tree) self._fill_sample_count(call_tree) if not call_tree['children']: return {} return self._format_tree( call_tree['children'][0], call_tree['sampleCount'])
Runs statistical profiler on a package.
def _profile_package(self): """Runs statistical profiler on a package.""" with _StatProfiler() as prof: prof.base_frame = inspect.currentframe() try: runpy.run_path(self._run_object, run_name='__main__') except SystemExit: pass call_tree = prof.call_tree return { 'objectName': self._object_name, 'sampleInterval': _SAMPLE_INTERVAL, 'runTime': prof.run_time, 'callStats': call_tree, 'totalSamples': call_tree.get('sampleCount', 0), 'timestamp': int(time.time()) }
Runs statistical profiler on a module.
def _profile_module(self): """Runs statistical profiler on a module.""" with open(self._run_object, 'rb') as srcfile, _StatProfiler() as prof: code = compile(srcfile.read(), self._run_object, 'exec') prof.base_frame = inspect.currentframe() try: exec(code, self._globs, None) except SystemExit: pass call_tree = prof.call_tree return { 'objectName': self._object_name, 'sampleInterval': _SAMPLE_INTERVAL, 'runTime': prof.run_time, 'callStats': call_tree, 'totalSamples': call_tree.get('sampleCount', 0), 'timestamp': int(time.time()) }
Runs statistical profiler on a function.
def profile_function(self): """Runs statistical profiler on a function.""" with _StatProfiler() as prof: result = self._run_object(*self._run_args, **self._run_kwargs) call_tree = prof.call_tree return { 'objectName': self._object_name, 'sampleInterval': _SAMPLE_INTERVAL, 'runTime': prof.run_time, 'callStats': call_tree, 'totalSamples': call_tree.get('sampleCount', 0), 'result': result, 'timestamp': int(time.time()) }
Processes collected stats for UI.
def _transform_stats(prof): """Processes collected stats for UI.""" records = [] for info, params in prof.stats.items(): filename, lineno, funcname = info cum_calls, num_calls, time_per_call, cum_time, _ = params if prof.total_tt == 0: percentage = 0 else: percentage = round(100 * (cum_time / prof.total_tt), 4) cum_time = round(cum_time, 4) func_name = '%s @ %s' % (funcname, filename) color_hash = base_profiler.hash_name(func_name) records.append( (filename, lineno, funcname, cum_time, percentage, num_calls, cum_calls, time_per_call, filename, color_hash)) return sorted(records, key=operator.itemgetter(4), reverse=True)
Runs cProfile on a package.
def _profile_package(self): """Runs cProfile on a package.""" prof = cProfile.Profile() prof.enable() try: runpy.run_path(self._run_object, run_name='__main__') except SystemExit: pass prof.disable() prof_stats = pstats.Stats(prof) prof_stats.calc_callees() return { 'objectName': self._object_name, 'callStats': self._transform_stats(prof_stats), 'totalTime': prof_stats.total_tt, 'primitiveCalls': prof_stats.prim_calls, 'totalCalls': prof_stats.total_calls, 'timestamp': int(time.time()) }
Runs cProfile on a module.
def _profile_module(self): """Runs cProfile on a module.""" prof = cProfile.Profile() try: with open(self._run_object, 'rb') as srcfile: code = compile(srcfile.read(), self._run_object, 'exec') prof.runctx(code, self._globs, None) except SystemExit: pass prof_stats = pstats.Stats(prof) prof_stats.calc_callees() return { 'objectName': self._object_name, 'callStats': self._transform_stats(prof_stats), 'totalTime': prof_stats.total_tt, 'primitiveCalls': prof_stats.prim_calls, 'totalCalls': prof_stats.total_calls, 'timestamp': int(time.time()) }
Runs cProfile on a function.
def profile_function(self): """Runs cProfile on a function.""" prof = cProfile.Profile() prof.enable() result = self._run_object(*self._run_args, **self._run_kwargs) prof.disable() prof_stats = pstats.Stats(prof) prof_stats.calc_callees() return { 'objectName': self._object_name, 'callStats': self._transform_stats(prof_stats), 'totalTime': prof_stats.total_tt, 'primitiveCalls': prof_stats.prim_calls, 'totalCalls': prof_stats.total_calls, 'result': result, 'timestamp': int(time.time()) }
Initializes DB.
def init_db(): """Initializes DB.""" with contextlib.closing(connect_to_db()) as db: db.cursor().executescript(DB_SCHEMA) db.commit()
Returns all existing guestbook records.
def show_guestbook(): """Returns all existing guestbook records.""" cursor = flask.g.db.execute( 'SELECT name, message FROM entry ORDER BY id DESC;') entries = [{'name': row[0], 'message': row[1]} for row in cursor.fetchall()] return jinja2.Template(LAYOUT).render(entries=entries)
Adds single guestbook record.
def add_entry(): """Adds single guestbook record.""" name, msg = flask.request.form['name'], flask.request.form['message'] flask.g.db.execute( 'INSERT INTO entry (name, message) VALUES (?, ?)', (name, msg)) flask.g.db.commit() return flask.redirect('/')
Profiler handler.
def profiler_handler(uri): """Profiler handler.""" # HTTP method should be GET. if uri == 'main': runner.run(show_guestbook, 'cmhp') # In this case HTTP method should be POST singe add_entry uses POST elif uri == 'add': runner.run(add_entry, 'cmhp') return flask.redirect('/')
Starts HTTP server with specified parameters.
def start(host, port, profiler_stats, dont_start_browser, debug_mode): """Starts HTTP server with specified parameters. Args: host: Server host name. port: Server port. profiler_stats: A dict with collected program stats. dont_start_browser: Whether to open browser after profiling. debug_mode: Whether to redirect stderr to /dev/null. """ stats_handler = functools.partial(StatsHandler, profiler_stats) if not debug_mode: sys.stderr = open(os.devnull, 'w') print('Starting HTTP server...') if not dont_start_browser: webbrowser.open('http://{}:{}/'.format(host, port)) try: StatsServer((host, port), stats_handler).serve_forever() except KeyboardInterrupt: print('Stopping...') sys.exit(0)
Handles index. html requests.
def _handle_root(): """Handles index.html requests.""" res_filename = os.path.join( os.path.dirname(__file__), _PROFILE_HTML) with io.open(res_filename, 'rb') as res_file: content = res_file.read() return content, 'text/html'
Handles static files requests.
def _handle_other(self): """Handles static files requests.""" res_filename = os.path.join( os.path.dirname(__file__), _STATIC_DIR, self.path[1:]) with io.open(res_filename, 'rb') as res_file: content = res_file.read() _, extension = os.path.splitext(self.path) return content, 'text/%s' % extension[1:]
Handles HTTP GET requests.
def do_GET(self): """Handles HTTP GET requests.""" handler = self.uri_map.get(self.path) or self._handle_other content, content_type = handler() compressed_content = gzip.compress(content) self._send_response( 200, headers=(('Content-type', '%s; charset=utf-8' % content_type), ('Content-Encoding', 'gzip'), ('Content-Length', len(compressed_content)))) self.wfile.write(compressed_content)
Handles HTTP POST requests.
def do_POST(self): """Handles HTTP POST requests.""" post_data = self.rfile.read(int(self.headers['Content-Length'])) json_data = gzip.decompress(post_data) self._profile_json.update(json.loads(json_data.decode('utf-8'))) self._send_response( 200, headers=(('Content-type', '%s; charset=utf-8' % 'text/json'), ('Content-Encoding', 'gzip'), ('Content-Length', len(post_data))))
Sends HTTP response code message and headers.
def _send_response(self, http_code, message=None, headers=None): """Sends HTTP response code, message and headers.""" self.send_response(http_code, message) if headers: for header in headers: self.send_header(*header) self.end_headers()
Main function of the module.
def main(): """Main function of the module.""" parser = argparse.ArgumentParser( prog=_PROGRAN_NAME, description=_MODULE_DESC, formatter_class=argparse.RawTextHelpFormatter) launch_modes = parser.add_mutually_exclusive_group(required=True) launch_modes.add_argument('-r', '--remote', dest='remote', action='store_true', default=False, help='launch in remote mode') launch_modes.add_argument('-i', '--input-file', dest='input_file', type=str, default='', help='render UI from file') launch_modes.add_argument('-c', '--config', nargs=2, dest='config', help=_CONFIG_DESC, metavar=('CONFIG', 'SRC')) parser.add_argument('-H', '--host', dest='host', default=_HOST, type=str, help='set internal webserver host') parser.add_argument('-p', '--port', dest='port', default=_PORT, type=int, help='set internal webserver port') parser.add_argument('-n', '--no-browser', dest='dont_start_browser', action='store_true', default=False, help="don't start browser automatically") parser.add_argument('-o', '--output-file', dest='output_file', type=str, default='', help='save profile to file') parser.add_argument('--debug', dest='debug_mode', action='store_true', default=False, help="don't suppress error messages") parser.add_argument('--version', action='version', version='vprof %s' % __version__) args = parser.parse_args() # Render UI from file. if args.input_file: with open(args.input_file) as ifile: saved_stats = json.loads(ifile.read()) if saved_stats['version'] != __version__: print('Incorrect profiler version - %s. %s is required.' % ( saved_stats['version'], __version__)) sys.exit(_ERR_CODES['input_file_error']) stats_server.start(args.host, args.port, saved_stats, args.dont_start_browser, args.debug_mode) # Launch in remote mode. elif args.remote: stats_server.start(args.host, args.port, {}, args.dont_start_browser, args.debug_mode) # Profiler mode. else: config, source = args.config try: program_stats = runner.run_profilers(source, config, verbose=True) except runner.AmbiguousConfigurationError: print('Profiler configuration %s is ambiguous. ' 'Please, remove duplicates.' % config) sys.exit(_ERR_CODES['ambiguous_configuration']) except runner.BadOptionError as exc: print(exc) sys.exit(_ERR_CODES['bad_option']) if args.output_file: with open(args.output_file, 'w') as outfile: program_stats['version'] = __version__ outfile.write(json.dumps(program_stats, indent=2)) else: stats_server.start( args.host, args.port, program_stats, args.dont_start_browser, args.debug_mode)
Checks whether path belongs to standard library or installed modules.
def check_standard_dir(module_path): """Checks whether path belongs to standard library or installed modules.""" if 'site-packages' in module_path: return True for stdlib_path in _STDLIB_PATHS: if fnmatch.fnmatchcase(module_path, stdlib_path + '*'): return True return False
Records line execution time.
def record_line(self, frame, event, arg): # pylint: disable=unused-argument """Records line execution time.""" if event == 'line': if self.prev_timestamp: runtime = time.time() - self.prev_timestamp self.lines.append([self.prev_path, self.prev_lineno, runtime]) self.prev_lineno = frame.f_lineno self.prev_path = frame.f_code.co_filename self.prev_timestamp = time.time() return self.record_line
Filters code from standard library from self. lines.
def lines_without_stdlib(self): """Filters code from standard library from self.lines.""" prev_line = None current_module_path = inspect.getabsfile(inspect.currentframe()) for module_path, lineno, runtime in self.lines: module_abspath = os.path.abspath(module_path) if not prev_line: prev_line = [module_abspath, lineno, runtime] else: if (not check_standard_dir(module_path) and module_abspath != current_module_path): yield prev_line prev_line = [module_abspath, lineno, runtime] else: prev_line[2] += runtime yield prev_line
Fills code heatmap and execution count dictionaries.
def fill_heatmap(self): """Fills code heatmap and execution count dictionaries.""" for module_path, lineno, runtime in self.lines_without_stdlib: self._execution_count[module_path][lineno] += 1 self._heatmap[module_path][lineno] += runtime
Calculates skip map for large sources. Skip map is a list of tuples where first element of tuple is line number and second is length of the skip region: [ ( 1 10 ) ( 15 10 ) ] means skipping 10 lines after line 1 and 10 lines after line 15.
def _calc_skips(self, heatmap, num_lines): """Calculates skip map for large sources. Skip map is a list of tuples where first element of tuple is line number and second is length of the skip region: [(1, 10), (15, 10)] means skipping 10 lines after line 1 and 10 lines after line 15. """ if num_lines < self.MIN_SKIP_SIZE: return [] skips, prev_line = [], 0 for line in sorted(heatmap): curr_skip = line - prev_line - 1 if curr_skip > self.SKIP_LINES: skips.append((prev_line, curr_skip)) prev_line = line if num_lines - prev_line > self.SKIP_LINES: skips.append((prev_line, num_lines - prev_line)) return skips
Skips lines in src_code specified by skip map.
def _skip_lines(src_code, skip_map): """Skips lines in src_code specified by skip map.""" if not skip_map: return [['line', j + 1, l] for j, l in enumerate(src_code)] code_with_skips, i = [], 0 for line, length in skip_map: code_with_skips.extend( ['line', i + j + 1, l] for j, l in enumerate(src_code[i:line])) if (code_with_skips and code_with_skips[-1][0] == 'skip'): # Merge skips. code_with_skips[-1][1] += length else: code_with_skips.append(['skip', length]) i = line + length code_with_skips.extend( ['line', i + j + 1, l] for j, l in enumerate(src_code[i:])) return code_with_skips
Calculates heatmap for package.
def _profile_package(self): """Calculates heatmap for package.""" with _CodeHeatmapCalculator() as prof: try: runpy.run_path(self._run_object, run_name='__main__') except SystemExit: pass heatmaps = [] for filename, heatmap in prof.heatmap.items(): if os.path.isfile(filename): heatmaps.append( self._format_heatmap( filename, heatmap, prof.execution_count[filename])) run_time = sum(heatmap['runTime'] for heatmap in heatmaps) return { 'objectName': self._run_object, 'runTime': run_time, 'heatmaps': heatmaps }
Formats heatmap for UI.
def _format_heatmap(self, filename, heatmap, execution_count): """Formats heatmap for UI.""" with open(filename) as src_file: file_source = src_file.read().split('\n') skip_map = self._calc_skips(heatmap, len(file_source)) run_time = sum(time for time in heatmap.values()) return { 'name': filename, 'heatmap': heatmap, 'executionCount': execution_count, 'srcCode': self._skip_lines(file_source, skip_map), 'runTime': run_time }
Calculates heatmap for module.
def _profile_module(self): """Calculates heatmap for module.""" with open(self._run_object, 'r') as srcfile: src_code = srcfile.read() code = compile(src_code, self._run_object, 'exec') try: with _CodeHeatmapCalculator() as prof: exec(code, self._globs, None) except SystemExit: pass heatmaps = [] for filename, heatmap in prof.heatmap.items(): if os.path.isfile(filename): heatmaps.append( self._format_heatmap( filename, heatmap, prof.execution_count[filename])) run_time = sum(heatmap['runTime'] for heatmap in heatmaps) return { 'objectName': self._run_object, 'runTime': run_time, 'heatmaps': heatmaps }
Calculates heatmap for function.
def profile_function(self): """Calculates heatmap for function.""" with _CodeHeatmapCalculator() as prof: result = self._run_object(*self._run_args, **self._run_kwargs) code_lines, start_line = inspect.getsourcelines(self._run_object) source_lines = [] for line in code_lines: source_lines.append(('line', start_line, line)) start_line += 1 filename = os.path.abspath(inspect.getsourcefile(self._run_object)) heatmap = prof.heatmap[filename] run_time = sum(time for time in heatmap.values()) return { 'objectName': self._object_name, 'runTime': run_time, 'result': result, 'timestamp': int(time.time()), 'heatmaps': [{ 'name': self._object_name, 'heatmap': heatmap, 'executionCount': prof.execution_count[filename], 'srcCode': source_lines, 'runTime': run_time }] }
Runs profilers on run_object.
def run_profilers(run_object, prof_config, verbose=False): """Runs profilers on run_object. Args: run_object: An object (string or tuple) for profiling. prof_config: A string with profilers configuration. verbose: True if info about running profilers should be shown. Returns: An ordered dictionary with collected stats. Raises: AmbiguousConfigurationError: when prof_config is ambiguous. BadOptionError: when unknown options are present in configuration. """ if len(prof_config) > len(set(prof_config)): raise AmbiguousConfigurationError( 'Profiler configuration %s is ambiguous' % prof_config) available_profilers = {opt for opt, _ in _PROFILERS} for option in prof_config: if option not in available_profilers: raise BadOptionError('Unknown option: %s' % option) run_stats = OrderedDict() present_profilers = ((o, p) for o, p in _PROFILERS if o in prof_config) for option, prof in present_profilers: curr_profiler = prof(run_object) if verbose: print('Running %s...' % curr_profiler.__class__.__name__) run_stats[option] = curr_profiler.run() return run_stats
Runs profilers on a function.
def run(func, options, args=(), kwargs={}, host='localhost', port=8000): # pylint: disable=dangerous-default-value """Runs profilers on a function. Args: func: A Python function. options: A string with profilers configuration (i.e. 'cmh'). args: func non-keyword arguments. kwargs: func keyword arguments. host: Host name to send collected data. port: Port number to send collected data. Returns: A result of func execution. """ run_stats = run_profilers((func, args, kwargs), options) result = None for prof in run_stats: if not result: result = run_stats[prof]['result'] del run_stats[prof]['result'] # Don't send result to remote host post_data = gzip.compress( json.dumps(run_stats).encode('utf-8')) urllib.request.urlopen('http://%s:%s' % (host, port), post_data) return result
Return probability estimates for the RDD containing test vector X.
def predict_proba(self, X): """ Return probability estimates for the RDD containing test vector X. Parameters ---------- X : RDD containing array-like items, shape = [m_samples, n_features] Returns ------- C : RDD with array-like items , shape = [n_samples, n_classes] Returns the probability of the samples for each class in the models for each RDD block. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ check_rdd(X, (sp.spmatrix, np.ndarray)) return X.map( lambda X: super(SparkBaseNB, self).predict_proba(X))
Return log - probability estimates for the RDD containing the test vector X.
def predict_log_proba(self, X): """ Return log-probability estimates for the RDD containing the test vector X. Parameters ---------- X : RDD containing array-like items, shape = [m_samples, n_features] Returns ------- C : RDD with array-like items, shape = [n_samples, n_classes] Returns the log-probability of the samples for each class in the model for each RDD block. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ # required, scikit call self.predict_log_proba(X) in predict_proba # and thus this function is call, it must have the same behavior when # not called by sparkit-learn if not isinstance(X, BlockRDD): return super(SparkBaseNB, self).predict_log_proba(X) check_rdd(X, (sp.spmatrix, np.ndarray)) return X.map( lambda X: super(SparkBaseNB, self).predict_log_proba(X))
Fit Gaussian Naive Bayes according to X y
def fit(self, Z, classes=None): """Fit Gaussian Naive Bayes according to X, y Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. Returns ------- self : object Returns self. """ check_rdd(Z, {'X': (sp.spmatrix, np.ndarray), 'y': (sp.spmatrix, np.ndarray)}) models = Z[:, ['X', 'y']].map( lambda X_y: self.partial_fit(X_y[0], X_y[1], classes)) avg = models.reduce(operator.add) self.__dict__.update(avg.__dict__) return self
TODO fulibacsi fix docstring Fit Multinomial Naive Bayes according to ( X y ) pair which is zipped into TupleRDD Z.
def fit(self, Z, classes=None): """ TODO fulibacsi fix docstring Fit Multinomial Naive Bayes according to (X,y) pair which is zipped into TupleRDD Z. Parameters ---------- Z : TupleRDD containing X [array-like, shape (m_samples, n_features)] and y [array-like, shape (m_samples,)] tuple Training vectors, where ,_samples is the number of samples in the block and n_features is the number of features, and y contains the target values. Returns ------- self : object Returns self. """ check_rdd(Z, {'X': (sp.spmatrix, np.ndarray), 'y': (sp.spmatrix, np.ndarray)}) if 'w' in Z.columns: models = Z[:, ['X', 'y', 'w']].map( lambda X_y_w: self.partial_fit( X_y_w[0], X_y_w[1], classes, X_y_w[2] ) ) else: models = Z[:, ['X', 'y']].map( lambda X_y: self.partial_fit(X_y[0], X_y[1], classes)) avg = models.sum() self.__dict__.update(avg.__dict__) return self
Create vocabulary
def _init_vocab(self, analyzed_docs): """Create vocabulary """ class SetAccum(AccumulatorParam): def zero(self, initialValue): return set(initialValue) def addInPlace(self, v1, v2): v1 |= v2 return v1 if not self.fixed_vocabulary_: accum = analyzed_docs._rdd.context.accumulator(set(), SetAccum()) analyzed_docs.foreach( lambda x: accum.add(set(chain.from_iterable(x)))) vocabulary = {t: i for i, t in enumerate(accum.value)} else: vocabulary = self.vocabulary_ if not vocabulary: raise ValueError("empty vocabulary; perhaps the documents only" " contain stop words") return vocabulary
Create sparse feature matrix and vocabulary where fixed_vocab = False
def _count_vocab(self, analyzed_docs): """Create sparse feature matrix, and vocabulary where fixed_vocab=False """ vocabulary = self.vocabulary_ j_indices = _make_int_array() indptr = _make_int_array() indptr.append(0) for doc in analyzed_docs: for feature in doc: try: j_indices.append(vocabulary[feature]) except KeyError: # Ignore out-of-vocabulary items for fixed_vocab=True continue indptr.append(len(j_indices)) j_indices = frombuffer_empty(j_indices, dtype=np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) values = np.ones(len(j_indices)) X = sp.csr_matrix((values, j_indices, indptr), shape=(len(indptr) - 1, len(vocabulary)), dtype=self.dtype) X.sum_duplicates() if self.binary: X.data.fill(1) return X
Sort features by name
def _sort_features(self, vocabulary): """Sort features by name Returns a reordered matrix and modifies the vocabulary in place """ sorted_features = sorted(six.iteritems(vocabulary)) map_index = np.empty(len(sorted_features), dtype=np.int32) for new_val, (term, old_val) in enumerate(sorted_features): map_index[new_val] = old_val vocabulary[term] = new_val return map_index
Remove too rare or too common features.
def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features. """ if high is None and low is None and limit is None: return X, set() # Calculate a mask based on document frequencies dfs = X.map(_document_frequency).sum() tfs = X.map(lambda x: np.asarray(x.sum(axis=0))).sum().ravel() mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = np.cumsum(mask) - 1 # maps old indices to new removed_terms = set() for term, old_index in list(six.iteritems(vocabulary)): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if len(kept_indices) == 0: raise ValueError("After pruning, no terms remain. Try a lower" " min_df or a higher max_df.") return kept_indices, removed_terms
Learn the vocabulary dictionary and return term - document matrix.
def fit_transform(self, Z): """Learn the vocabulary dictionary and return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- Z : iterable or DictRDD with column 'X' An iterable of raw_documents which yields either str, unicode or file objects; or a DictRDD with column 'X' containing such iterables. Returns ------- X : array, [n_samples, n_features] or DictRDD Document-term matrix. """ self._validate_vocabulary() # map analyzer and cache result analyze = self.build_analyzer() A = Z.transform(lambda X: list(map(analyze, X)), column='X').persist() # create vocabulary X = A[:, 'X'] if isinstance(A, DictRDD) else A self.vocabulary_ = self._init_vocab(X) # transform according to vocabulary mapper = self.broadcast(self._count_vocab, A.context) Z = A.transform(mapper, column='X', dtype=sp.spmatrix) if not self.fixed_vocabulary_: X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z max_df = self.max_df min_df = self.min_df max_features = self.max_features # limit features according to min_df, max_df parameters n_doc = X.shape[0] max_doc_count = (max_df if isinstance(max_df, numbers.Integral) else max_df * n_doc) min_doc_count = (min_df if isinstance(min_df, numbers.Integral) else min_df * n_doc) if max_doc_count < min_doc_count: raise ValueError( "max_df corresponds to < documents than min_df") kept_indices, self.stop_words_ = self._limit_features( X, self.vocabulary_, max_doc_count, min_doc_count, max_features) # sort features map_index = self._sort_features(self.vocabulary_) # combined mask mask = kept_indices[map_index] Z = Z.transform(lambda x: x[:, mask], column='X', dtype=sp.spmatrix) A.unpersist() return Z
Transform documents to document - term matrix.
def transform(self, Z): """Transform documents to document-term matrix. Extract token counts out of raw text documents using the vocabulary fitted with fit or the one provided to the constructor. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : sparse matrix, [n_samples, n_features] Document-term matrix. """ if not hasattr(self, 'vocabulary_'): self._validate_vocabulary() self._check_vocabulary() analyze = self.build_analyzer() mapper = self.broadcast(self._count_vocab, Z.context) Z = Z.transform(lambda X: list(map(analyze, X)), column='X') \ .transform(mapper, column='X', dtype=sp.spmatrix) return Z
Transform an ArrayRDD ( or DictRDD with column X ) containing sequence of documents to a document - term matrix.
def transform(self, Z): """Transform an ArrayRDD (or DictRDD with column 'X') containing sequence of documents to a document-term matrix. Parameters ---------- Z : ArrayRDD or DictRDD with raw text documents Samples. Each sample must be a text document (either bytes or unicode strings) which will be tokenized and hashed. Returns ------- Z : SparseRDD/DictRDD containg scipy.sparse matrix Document-term matrix. """ mapper = super(SparkHashingVectorizer, self).transform return Z.transform(mapper, column='X', dtype=sp.spmatrix)
Learn the idf vector ( global term weights )
def fit(self, Z): """Learn the idf vector (global term weights) Parameters ---------- Z : ArrayRDD or DictRDD containing (sparse matrices|ndarray) a matrix of term/token counts Returns ------- self : TfidfVectorizer """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (sp.spmatrix, np.ndarray)) def mapper(X, use_idf=self.use_idf): if not sp.issparse(X): X = sp.csc_matrix(X) if use_idf: return _document_frequency(X) if self.use_idf: n_samples, n_features = X.shape df = X.map(mapper).treeReduce(operator.add) # perform idf smoothing if required df += int(self.smooth_idf) n_samples += int(self.smooth_idf) # log1p instead of log makes sure terms with zero idf don't get # suppressed entirely idf = np.log(float(n_samples) / df) + 1.0 self._idf_diag = sp.spdiags(idf, diags=0, m=n_features, n=n_features) return self
Compute the mean and std to be used for later scaling. Parameters ---------- Z: DictRDD containing ( X y ) pairs X - Training vector. { array - like sparse matrix } shape [ n_samples n_features ] The data used to compute the mean and standard deviation used for later scaling along the features axis. y - Target labels Passthrough for Pipeline compatibility.
def fit(self, Z): """Compute the mean and std to be used for later scaling. Parameters ---------- Z : DictRDD containing (X, y) pairs X - Training vector. {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y - Target labels Passthrough for ``Pipeline`` compatibility. """ # Reset internal state before fitting self._reset() X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (np.ndarray, sp.spmatrix)) def mapper(X): """Calculate statistics for every numpy or scipy blocks.""" X = check_array(X, ('csr', 'csc'), dtype=np.float64) if hasattr(X, "toarray"): # sparse matrix mean, var = mean_variance_axis(X, axis=0) else: mean, var = np.mean(X, axis=0), np.var(X, axis=0) return X.shape[0], mean, var def reducer(a, b): """Calculate the combined statistics.""" n_a, mean_a, var_a = a n_b, mean_b, var_b = b n_ab = n_a + n_b mean_ab = ((mean_a * n_a) + (mean_b * n_b)) / n_ab var_ab = (((n_a * var_a) + (n_b * var_b)) / n_ab) + \ ((n_a * n_b) * ((mean_b - mean_a) / n_ab) ** 2) return (n_ab, mean_ab, var_ab) if check_rdd_dtype(X, (sp.spmatrix)): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") self.n_samples_seen_, self.mean_, self.var_ = X.map(mapper).treeReduce(reducer) if self.with_std: self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_)) else: self.scale_ = None return self
Perform standardization by centering and scaling Parameters ---------- Z: DictRDD containing ( X y ) pairs X - Training vector y - Target labels Returns ------- C: DictRDD containing ( X y ) pairs X - Training vector standardized y - Target labels
def transform(self, Z): """Perform standardization by centering and scaling Parameters ---------- Z : DictRDD containing (X, y) pairs X - Training vector y - Target labels Returns ------- C : DictRDD containing (X, y) pairs X - Training vector standardized y - Target labels """ X = Z[:, 'X'] if isinstance(Z, DictRDD) else Z check_rdd(X, (np.ndarray, sp.spmatrix)) if check_rdd_dtype(X, (sp.spmatrix)): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: def mapper(X): inplace_column_scale(X, 1 / self.scale_) return X else: if self.with_mean: if self.with_std: def mapper(X): X -= self.mean_ X /= self.scale_ return X else: def mapper(X): X -= self.mean_ return X else: if self.with_std: def mapper(X): X /= self.scale_ return X else: raise ValueError("Need with_std or with_mean ") return Z.transform(mapper, column="X")
Convert to equivalent StandardScaler
def to_scikit(self): """ Convert to equivalent StandardScaler """ scaler = StandardScaler(with_mean=self.with_mean, with_std=self.with_std, copy=self.copy) scaler.__dict__ = self.__dict__ return scaler
Wraps a Scikit - learn Linear model s fit method to use with RDD input.
def _spark_fit(self, cls, Z, *args, **kwargs): """Wraps a Scikit-learn Linear model's fit method to use with RDD input. Parameters ---------- cls : class object The sklearn linear model's class to wrap. Z : TupleRDD or DictRDD The distributed train data in a DictRDD. Returns ------- self: the wrapped class """ mapper = lambda X_y: super(cls, self).fit( X_y[0], X_y[1], *args, **kwargs ) models = Z.map(mapper) avg = models.reduce(operator.add) / models.count() self.__dict__.update(avg.__dict__) return self
Wraps a Scikit - learn Linear model s predict method to use with RDD input.
def _spark_predict(self, cls, X, *args, **kwargs): """Wraps a Scikit-learn Linear model's predict method to use with RDD input. Parameters ---------- cls : class object The sklearn linear model's class to wrap. Z : ArrayRDD The distributed data to predict in a DictRDD. Returns ------- self: the wrapped class """ return X.map(lambda X: super(cls, self).predict(X, *args, **kwargs))
Fit linear model.
def fit(self, Z): """ Fit linear model. Parameters ---------- Z : DictRDD with (X, y) values X containing numpy array or sparse matrix - The training data y containing the target values Returns ------- self : returns an instance of self. """ check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)}) return self._spark_fit(SparkLinearRegression, Z)
Fit all the transforms one after the other and transform the data then fit the transformed data using the final estimator.
def fit(self, Z, **fit_params): """Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. Parameters ---------- Z : ArrayRDD, TupleRDD or DictRDD Input data in blocked distributed format. Returns ------- self : SparkPipeline """ Zt, fit_params = self._pre_transform(Z, **fit_params) self.steps[-1][-1].fit(Zt, **fit_params) Zt.unpersist() return self
Fit all the transforms one after the other and transform the data then use fit_transform on transformed data using the final estimator.
def fit_transform(self, Z, **fit_params): """Fit all the transforms one after the other and transform the data, then use fit_transform on transformed data using the final estimator.""" Zt, fit_params = self._pre_transform(Z, **fit_params) if hasattr(self.steps[-1][-1], 'fit_transform'): return self.steps[-1][-1].fit_transform(Zt, **fit_params) else: return self.steps[-1][-1].fit(Zt, **fit_params).transform(Zt)
Applies transforms to the data and the score method of the final estimator. Valid only if the final estimator implements score.
def score(self, Z): """Applies transforms to the data, and the score method of the final estimator. Valid only if the final estimator implements score.""" Zt = Z for name, transform in self.steps[:-1]: Zt = transform.transform(Zt) return self.steps[-1][-1].score(Zt)
TODO: rewrite docstring Fit all transformers using X. Parameters ---------- X: array - like or sparse matrix shape ( n_samples n_features ) Input data used to fit transformers.
def fit(self, Z, **fit_params): """TODO: rewrite docstring Fit all transformers using X. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data, used to fit transformers. """ fit_params_steps = dict((step, {}) for step, _ in self.transformer_list) for pname, pval in six.iteritems(fit_params): step, param = pname.split('__', 1) fit_params_steps[step][param] = pval transformers = Parallel(n_jobs=self.n_jobs, backend="threading")( delayed(_fit_one_transformer)(trans, Z, **fit_params_steps[name]) for name, trans in self.transformer_list) self._update_transformer_list(transformers) return self