Skip to content

Derived Mesh Docstrings

TexturedPhotogrammetryMeshChunked

Bases: TexturedPhotogrammetryMesh

Extends the TexturedPhotogrammtery mesh by allowing chunked operations for large meshes

Source code in geograypher/meshes/derived_meshes.py
class TexturedPhotogrammetryMeshChunked(TexturedPhotogrammetryMesh):
    """Extends the TexturedPhotogrammtery mesh by allowing chunked operations for large meshes"""

    def get_mesh_chunks_for_cameras(
        self,
        cameras: typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet],
        n_clusters: int = 8,
        buffer_dist_meters: float = CHUNKED_MESH_BUFFER_DIST_METERS,
        vis_clusters: bool = False,
        include_texture: bool = False,
    ):
        """Return a generator of sub-meshes, chunked to align with clusters of cameras

        Args:
            cameras (typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet]):
                The chunks of the mesh are generated by clustering the cameras
            n_clusters (int, optional):
                The mesh is broken up into this many clusters. Defaults to 8.
            buffer_dist_meters (float, optional):
                Each cluster contains the mesh that is within this distance in meters of the camera
                locations. Defaults to 50.
            vis_clusters (bool, optional):
                Should the location of the cameras and resultant clusters be shown. Defaults to False.
            include_texture (bool, optional): Should the texture from the full mesh be included
                in the subset mesh. Defaults to False.

        Yields:
            pv.PolyData: The subset mesh
            PhotogrammetryCameraSet: The cameras associated with that mesh
            np.ndarray: The IDs of the faces in the original mesh used to generate the sub mesh

        """
        # Extract the points depending on whether it's a single camera or a set
        if isinstance(cameras, PhotogrammetryCamera):
            camera_points = [Point(*cameras.get_lon_lat())]
        else:
            # Get the lat lon for each camera point and turn into a shapely Point
            camera_points = [
                Point(*lon_lat) for lon_lat in cameras.get_lon_lat_coords()
            ]

        # Create a geodataframe from the points
        camera_points = gpd.GeoDataFrame(
            geometry=camera_points, crs=pyproj.CRS.from_epsg("4326")
        )
        # Make sure the gdf has a gemetric CRS so there is no warping of the space
        camera_points = ensure_projected_CRS(camera_points)
        # Extract the x, y points now in a geometric CRS
        camera_points_numpy = np.stack(
            camera_points.geometry.apply(lambda point: (point.x, point.y))
        )

        # Assign each camera to a cluster
        camera_cluster_IDs = KMeans(n_clusters=n_clusters).fit_predict(
            camera_points_numpy
        )
        if vis_clusters:
            # Show the camera locations, colored by which one they were assigned to
            plt.scatter(
                camera_points_numpy[:, 0],
                camera_points_numpy[:, 1],
                c=camera_cluster_IDs,
                cmap="tab20",
            )
            plt.show()

        # Get the texture from the full mesh
        full_mesh_texture = (
            self.get_texture(request_vertex_texture=False) if include_texture else None
        )

        # Iterate over the clusters of cameras
        for cluster_ID in tqdm(range(n_clusters), desc="Chunks in mesh"):
            # Get indices of cameras for that cluster
            matching_camera_inds = np.where(cluster_ID == camera_cluster_IDs)[0]
            # Get the segmentor camera set for the subset of the camera inds
            sub_camera_set = cameras.get_subset_cameras(matching_camera_inds)
            # Extract the rows in the dataframe for those IDs
            subset_camera_points = camera_points.iloc[matching_camera_inds]

            # TODO this could be accellerated by computing the membership for all points at the begining.
            # This would require computing all the ROIs (potentially-overlapping) for each region first. Then, finding all the non-overlapping
            # partition where each polygon corresponds to a set of ROIs. Then the membership for each vertex could be found for each polygon
            # and the membership in each ROI could be computed. This should be benchmarked though, because having more polygons than original
            # ROIs may actually lead to slower computations than doing it sequentially

            # Extract a sub mesh for a region around the camera points and also retain the indices into the original mesh
            sub_mesh_pv, _, face_IDs = self.select_mesh_ROI(
                region_of_interest=subset_camera_points,
                buffer_meters=buffer_dist_meters,
                return_original_IDs=True,
            )
            # Extract the corresponding texture elements for this sub mesh if needed
            # If include_texture=False, the full_mesh_texture will not be set
            # If there is no mesh, the texture should also be set to None, otherwise it will be
            # ambigious whether it's a face or vertex texture
            sub_mesh_texture = (
                full_mesh_texture[face_IDs]
                if full_mesh_texture is not None and len(face_IDs) > 0
                else None
            )

            # Wrap this pyvista mesh in a photogrammetry mesh
            sub_mesh_TPM = TexturedPhotogrammetryMesh(
                sub_mesh_pv, texture=sub_mesh_texture
            )

            # Return the submesh as a Textured Photogrammetry Mesh, the subset of cameras, and the
            # face IDs mapping the faces in the sub mesh back to the full one
            yield sub_mesh_TPM, sub_camera_set, face_IDs

    def render_flat(
        self,
        cameras: typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet],
        batch_size: int = 1,
        render_img_scale: float = 1,
        n_clusters: int = 8,
        buffer_dist_meters: float = CHUNKED_MESH_BUFFER_DIST_METERS,
        vis_clusters: bool = False,
        **pix2face_kwargs,
    ):
        """
        Render the texture from the viewpoint of each camera in cameras. Note that this is a
        generator so if you want to actually execute the computation, call list(*) on the output.
        This version first clusters the cameras, extracts a region of the mesh surrounding each
        cluster of cameras, and then performs rendering on each sub-region.

        Args:
            cameras (typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet]):
                Either a single camera or a camera set. The texture will be rendered from the
                perspective of each one
            batch_size (int, optional):
                The batch size for pix2face. Defaults to 1.
            render_img_scale (float, optional):
                The rendered image will be this fraction of the original image corresponding to the
                virtual camera. Defaults to 1.
            n_clusters (int, optional):
                Number of clusters to break the cameras into. Defaults to 8.
            buffer_dist_meters (float, optional):
                How far around the cameras to include the mesh. Defaults to 50.
            vis_clusters (bool, optional):
                Should the clusters of camera locations be shown. Defaults to False.

        Raises:
            TypeError: If cameras is not the correct type

        Yields:
            np.ndarray:
               The pix2face array for the next camera. The shape is
               (int(img_h*render_img_scale), int(img_w*render_img_scale)).
        """
        # Create a generator to chunked meshes based on clusters of cameras
        chunk_gen = self.get_mesh_chunks_for_cameras(
            cameras,
            n_clusters=n_clusters,
            buffer_dist_meters=buffer_dist_meters,
            vis_clusters=vis_clusters,
            include_texture=True,
        )

        for sub_mesh_TPM, sub_camera_set, _ in tqdm(
            chunk_gen, total=n_clusters, desc="Rendering by chunks"
        ):
            # Create the render generator
            render_gen = sub_mesh_TPM.render_flat(
                sub_camera_set,
                batch_size=batch_size,
                render_img_scale=render_img_scale,
                **pix2face_kwargs,
            )
            # Yield items from the returned generator
            for render_item in render_gen:
                yield render_item

    def aggregate_projected_images(
        self,
        cameras: typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet],
        batch_size: int = 1,
        aggregate_img_scale: float = 1,
        n_clusters: int = 8,
        buffer_dist_meters: float = CHUNKED_MESH_BUFFER_DIST_METERS,
        vis_clusters: bool = False,
        **kwargs,
    ):
        """
        Aggregate the imagery from multiple cameras into per-face averges. This version chunks the
        mesh up and performs aggregation on sub-regions to decrease the runtime.

        Args:
            cameras (typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet]):
                The cameras to aggregate the images from. cam.get_image() will be called on each
                element.
            batch_size (int, optional):
                The number of cameras to compute correspondences for at once. Defaults to 1.
            aggregate_img_scale (float, optional):
                The scale of pixel-to-face correspondences image, as a fraction of the original
                image. Lower values lead to better runtimes but decreased precision at content
                boundaries in the images. Defaults to 1.
            n_clusters (int, optional):
                The mesh is broken up into this many clusters. Defaults to 8.
            buffer_dist_meters (float, optional):
                Each cluster contains the mesh that is within this distance in meters of the camera
                locations. Defaults to 250.
            vis_clusters (bool, optional):
                Should the location of the cameras and resultant clusters be shown. Defaults to False.

        Returns:
            np.ndarray: (n_faces, n_image_channels) The average projected image per face
            dict: Additional information, including the summed projections, observations per face,
                  and potentially each individual projection
        """

        # Initialize the values that will be incremented per cluster
        summed_projections = np.zeros(
            (self.pyvista_mesh.n_faces, cameras.n_image_channels()), dtype=float
        )
        projection_counts = np.zeros(self.pyvista_mesh.n_faces, dtype=int)

        # Create a generator to generate chunked meshes
        chunk_gen = self.get_mesh_chunks_for_cameras(
            cameras,
            n_clusters=n_clusters,
            buffer_dist_meters=buffer_dist_meters,
            vis_clusters=vis_clusters,
        )

        # Iterate over chunks in the mesh
        for sub_mesh_TPM, sub_camera_set, face_IDs in chunk_gen:
            # This means there was no mesh for these cameras
            if len(face_IDs) == 0:
                continue

            # Aggregate the projections from a set of cameras corresponding to
            _, additional_information_submesh = sub_mesh_TPM.aggregate_projected_images(
                sub_camera_set,
                batch_size=batch_size,
                aggregate_img_scale=aggregate_img_scale,
                return_all=False,
                **kwargs,
            )

            # Increment the summed predictions and counts
            # Make sure that nans don't propogate, since they should just be treated as zeros
            # TODO ensure this is correct
            summed_projections[face_IDs] = np.nansum(
                [
                    summed_projections[face_IDs],
                    additional_information_submesh["summed_projections"],
                ],
                axis=0,
            )
            projection_counts[face_IDs] = (
                projection_counts[face_IDs]
                + additional_information_submesh["projection_counts"]
            )

        # Same as the parent class
        no_projections = projection_counts == 0
        summed_projections[no_projections] = np.nan

        additional_information = {
            "projection_counts": projection_counts,
            "summed_projections": summed_projections,
        }

        average_projections = np.divide(
            summed_projections, np.expand_dims(projection_counts, 1)
        )

        return average_projections, additional_information

    def label_polygons(
        self,
        face_labels: np.ndarray,
        polygons: typing.Union[PATH_TYPE, gpd.GeoDataFrame],
        face_weighting: typing.Union[None, np.ndarray] = None,
        sjoin_overlay: bool = True,
        return_class_labels: bool = True,
        unknown_class_label: str = "unknown",
        buffer_dist_meters: float = 2,
        n_polygons_per_cluster: int = 1000,
    ):
        """
        Assign a class label to polygons using labels per face. This implementation is useful for
        large numbers of polygons. To make the expensive sjoin/overlay more efficient, this
        implementation first clusters the polygons and labels each cluster indepenently. This makes
        use of the fact that the mesh faces around this cluster can be extracted relatively quickly.
        Then the sjoin/overlay is computed with substaintially-fewer polygons and faces, leading to
        better performance.

        Args:
            face_labels (np.ndarray): (n_faces,) array of integer labels
            polygons (typing.Union[PATH_TYPE, gpd.GeoDataFrame]): Geospatial polygons to be labeled
            face_weighting (typing.Union[None, np.ndarray], optional):
                (n_faces,) array of scalar weights for each face, to be multiplied with the
                contribution of this face. Defaults to None.
            sjoin_overlay (bool, optional):
                Whether to use `gpd.sjoin` or `gpd.overlay` to compute the overlay. Sjoin is
                substaintially faster, but only uses mesh faces that are entirely within the bounds
                of the polygon, rather than computing the intersecting region for
                partially-overlapping faces. Defaults to True.
            return_class_labels: (bool, optional):
                Return string representation of class labels rather than float. Defaults to True.
            unknown_class_label (str, optional):
                Label for predicted class for polygons with no overlapping faces. Defaults to "unknown".
            buffer_dist_meters: (Union[float, None], optional)
                Only applicable if sjoin_overlay=False. In that case, include faces entirely within
                the region that is this distance in meters from the polygons. Defaults to 2.0.
            n_polygons_per_cluster: (int):
                Set the number of clusters so there are approximately this number polygons per
                cluster on average. Defaults to 1000

        Raises:
            ValueError: if faces_labels or face_weighting is not 1D

        Returns:
            list(typing.Union[str, int]):
                (n_polygons,) list of labels. Either float values, represnting integer IDs or nan,
                or string values representing the class label
        """
        # Load in the polygons
        polygons_gdf = ensure_projected_CRS(coerce_to_geoframe(polygons))
        # Extract the centroid of each one and convert to a numpy array
        centroids_xy = np.stack(
            polygons_gdf.centroid.apply(lambda point: (point.x, point.y))
        )
        # Determine how many clusters there should be
        n_clusters = int(np.ceil(len(polygons_gdf) / n_polygons_per_cluster))
        # Assign each polygon to a cluster
        polygon_cluster_IDs = KMeans(n_clusters=n_clusters).fit_predict(centroids_xy)

        # This will be set later once we figure out the datatype of the per-cluster labels
        all_labels = None

        # Loop over the individual clusters
        for cluster_ID in tqdm(range(n_clusters), desc="Clusters of polygons"):
            # Determine which polygons are part of that cluster
            cluster_mask = polygon_cluster_IDs == cluster_ID
            # Extract the polygons from one cluster
            cluster_polygons = polygons_gdf.iloc[cluster_mask]
            # Compute the labeling per polygon
            cluster_labels = super().label_polygons(
                face_labels,
                cluster_polygons,
                face_weighting,
                sjoin_overlay,
                return_class_labels,
                unknown_class_label,
                buffer_dist_meters,
            )
            # Convert to numpy array
            cluster_labels = np.array(cluster_labels)
            # Create the aggregation array with the appropriate datatype
            if all_labels is None:
                # We assume that this list will be at least one element since each cluster
                # should be non-empty. All values should be overwritten so the default value doesn't matter
                all_labels = np.zeros(len(polygons_gdf), dtype=cluster_labels.dtype)

            # Set the appropriate elements of the full array with the newly-computed cluster labels
            all_labels[cluster_mask] = cluster_labels

        # The output is expected to be a list
        all_labels = all_labels.tolist()
        return all_labels

Functions

aggregate_projected_images(cameras, batch_size=1, aggregate_img_scale=1, n_clusters=8, buffer_dist_meters=CHUNKED_MESH_BUFFER_DIST_METERS, vis_clusters=False, **kwargs)

Aggregate the imagery from multiple cameras into per-face averges. This version chunks the mesh up and performs aggregation on sub-regions to decrease the runtime.

Parameters:

Name Type Description Default
cameras Union[PhotogrammetryCamera, PhotogrammetryCameraSet]

The cameras to aggregate the images from. cam.get_image() will be called on each element.

required
batch_size int

The number of cameras to compute correspondences for at once. Defaults to 1.

1
aggregate_img_scale float

The scale of pixel-to-face correspondences image, as a fraction of the original image. Lower values lead to better runtimes but decreased precision at content boundaries in the images. Defaults to 1.

1
n_clusters int

The mesh is broken up into this many clusters. Defaults to 8.

8
buffer_dist_meters float

Each cluster contains the mesh that is within this distance in meters of the camera locations. Defaults to 250.

CHUNKED_MESH_BUFFER_DIST_METERS
vis_clusters bool

Should the location of the cameras and resultant clusters be shown. Defaults to False.

False

Returns:

Name Type Description

np.ndarray: (n_faces, n_image_channels) The average projected image per face

dict

Additional information, including the summed projections, observations per face, and potentially each individual projection

Source code in geograypher/meshes/derived_meshes.py
def aggregate_projected_images(
    self,
    cameras: typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet],
    batch_size: int = 1,
    aggregate_img_scale: float = 1,
    n_clusters: int = 8,
    buffer_dist_meters: float = CHUNKED_MESH_BUFFER_DIST_METERS,
    vis_clusters: bool = False,
    **kwargs,
):
    """
    Aggregate the imagery from multiple cameras into per-face averges. This version chunks the
    mesh up and performs aggregation on sub-regions to decrease the runtime.

    Args:
        cameras (typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet]):
            The cameras to aggregate the images from. cam.get_image() will be called on each
            element.
        batch_size (int, optional):
            The number of cameras to compute correspondences for at once. Defaults to 1.
        aggregate_img_scale (float, optional):
            The scale of pixel-to-face correspondences image, as a fraction of the original
            image. Lower values lead to better runtimes but decreased precision at content
            boundaries in the images. Defaults to 1.
        n_clusters (int, optional):
            The mesh is broken up into this many clusters. Defaults to 8.
        buffer_dist_meters (float, optional):
            Each cluster contains the mesh that is within this distance in meters of the camera
            locations. Defaults to 250.
        vis_clusters (bool, optional):
            Should the location of the cameras and resultant clusters be shown. Defaults to False.

    Returns:
        np.ndarray: (n_faces, n_image_channels) The average projected image per face
        dict: Additional information, including the summed projections, observations per face,
              and potentially each individual projection
    """

    # Initialize the values that will be incremented per cluster
    summed_projections = np.zeros(
        (self.pyvista_mesh.n_faces, cameras.n_image_channels()), dtype=float
    )
    projection_counts = np.zeros(self.pyvista_mesh.n_faces, dtype=int)

    # Create a generator to generate chunked meshes
    chunk_gen = self.get_mesh_chunks_for_cameras(
        cameras,
        n_clusters=n_clusters,
        buffer_dist_meters=buffer_dist_meters,
        vis_clusters=vis_clusters,
    )

    # Iterate over chunks in the mesh
    for sub_mesh_TPM, sub_camera_set, face_IDs in chunk_gen:
        # This means there was no mesh for these cameras
        if len(face_IDs) == 0:
            continue

        # Aggregate the projections from a set of cameras corresponding to
        _, additional_information_submesh = sub_mesh_TPM.aggregate_projected_images(
            sub_camera_set,
            batch_size=batch_size,
            aggregate_img_scale=aggregate_img_scale,
            return_all=False,
            **kwargs,
        )

        # Increment the summed predictions and counts
        # Make sure that nans don't propogate, since they should just be treated as zeros
        # TODO ensure this is correct
        summed_projections[face_IDs] = np.nansum(
            [
                summed_projections[face_IDs],
                additional_information_submesh["summed_projections"],
            ],
            axis=0,
        )
        projection_counts[face_IDs] = (
            projection_counts[face_IDs]
            + additional_information_submesh["projection_counts"]
        )

    # Same as the parent class
    no_projections = projection_counts == 0
    summed_projections[no_projections] = np.nan

    additional_information = {
        "projection_counts": projection_counts,
        "summed_projections": summed_projections,
    }

    average_projections = np.divide(
        summed_projections, np.expand_dims(projection_counts, 1)
    )

    return average_projections, additional_information

get_mesh_chunks_for_cameras(cameras, n_clusters=8, buffer_dist_meters=CHUNKED_MESH_BUFFER_DIST_METERS, vis_clusters=False, include_texture=False)

Return a generator of sub-meshes, chunked to align with clusters of cameras

Parameters:

Name Type Description Default
cameras Union[PhotogrammetryCamera, PhotogrammetryCameraSet]

The chunks of the mesh are generated by clustering the cameras

required
n_clusters int

The mesh is broken up into this many clusters. Defaults to 8.

8
buffer_dist_meters float

Each cluster contains the mesh that is within this distance in meters of the camera locations. Defaults to 50.

CHUNKED_MESH_BUFFER_DIST_METERS
vis_clusters bool

Should the location of the cameras and resultant clusters be shown. Defaults to False.

False
include_texture bool

Should the texture from the full mesh be included in the subset mesh. Defaults to False.

False

Yields:

Name Type Description

pv.PolyData: The subset mesh

PhotogrammetryCameraSet

The cameras associated with that mesh

np.ndarray: The IDs of the faces in the original mesh used to generate the sub mesh

Source code in geograypher/meshes/derived_meshes.py
def get_mesh_chunks_for_cameras(
    self,
    cameras: typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet],
    n_clusters: int = 8,
    buffer_dist_meters: float = CHUNKED_MESH_BUFFER_DIST_METERS,
    vis_clusters: bool = False,
    include_texture: bool = False,
):
    """Return a generator of sub-meshes, chunked to align with clusters of cameras

    Args:
        cameras (typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet]):
            The chunks of the mesh are generated by clustering the cameras
        n_clusters (int, optional):
            The mesh is broken up into this many clusters. Defaults to 8.
        buffer_dist_meters (float, optional):
            Each cluster contains the mesh that is within this distance in meters of the camera
            locations. Defaults to 50.
        vis_clusters (bool, optional):
            Should the location of the cameras and resultant clusters be shown. Defaults to False.
        include_texture (bool, optional): Should the texture from the full mesh be included
            in the subset mesh. Defaults to False.

    Yields:
        pv.PolyData: The subset mesh
        PhotogrammetryCameraSet: The cameras associated with that mesh
        np.ndarray: The IDs of the faces in the original mesh used to generate the sub mesh

    """
    # Extract the points depending on whether it's a single camera or a set
    if isinstance(cameras, PhotogrammetryCamera):
        camera_points = [Point(*cameras.get_lon_lat())]
    else:
        # Get the lat lon for each camera point and turn into a shapely Point
        camera_points = [
            Point(*lon_lat) for lon_lat in cameras.get_lon_lat_coords()
        ]

    # Create a geodataframe from the points
    camera_points = gpd.GeoDataFrame(
        geometry=camera_points, crs=pyproj.CRS.from_epsg("4326")
    )
    # Make sure the gdf has a gemetric CRS so there is no warping of the space
    camera_points = ensure_projected_CRS(camera_points)
    # Extract the x, y points now in a geometric CRS
    camera_points_numpy = np.stack(
        camera_points.geometry.apply(lambda point: (point.x, point.y))
    )

    # Assign each camera to a cluster
    camera_cluster_IDs = KMeans(n_clusters=n_clusters).fit_predict(
        camera_points_numpy
    )
    if vis_clusters:
        # Show the camera locations, colored by which one they were assigned to
        plt.scatter(
            camera_points_numpy[:, 0],
            camera_points_numpy[:, 1],
            c=camera_cluster_IDs,
            cmap="tab20",
        )
        plt.show()

    # Get the texture from the full mesh
    full_mesh_texture = (
        self.get_texture(request_vertex_texture=False) if include_texture else None
    )

    # Iterate over the clusters of cameras
    for cluster_ID in tqdm(range(n_clusters), desc="Chunks in mesh"):
        # Get indices of cameras for that cluster
        matching_camera_inds = np.where(cluster_ID == camera_cluster_IDs)[0]
        # Get the segmentor camera set for the subset of the camera inds
        sub_camera_set = cameras.get_subset_cameras(matching_camera_inds)
        # Extract the rows in the dataframe for those IDs
        subset_camera_points = camera_points.iloc[matching_camera_inds]

        # TODO this could be accellerated by computing the membership for all points at the begining.
        # This would require computing all the ROIs (potentially-overlapping) for each region first. Then, finding all the non-overlapping
        # partition where each polygon corresponds to a set of ROIs. Then the membership for each vertex could be found for each polygon
        # and the membership in each ROI could be computed. This should be benchmarked though, because having more polygons than original
        # ROIs may actually lead to slower computations than doing it sequentially

        # Extract a sub mesh for a region around the camera points and also retain the indices into the original mesh
        sub_mesh_pv, _, face_IDs = self.select_mesh_ROI(
            region_of_interest=subset_camera_points,
            buffer_meters=buffer_dist_meters,
            return_original_IDs=True,
        )
        # Extract the corresponding texture elements for this sub mesh if needed
        # If include_texture=False, the full_mesh_texture will not be set
        # If there is no mesh, the texture should also be set to None, otherwise it will be
        # ambigious whether it's a face or vertex texture
        sub_mesh_texture = (
            full_mesh_texture[face_IDs]
            if full_mesh_texture is not None and len(face_IDs) > 0
            else None
        )

        # Wrap this pyvista mesh in a photogrammetry mesh
        sub_mesh_TPM = TexturedPhotogrammetryMesh(
            sub_mesh_pv, texture=sub_mesh_texture
        )

        # Return the submesh as a Textured Photogrammetry Mesh, the subset of cameras, and the
        # face IDs mapping the faces in the sub mesh back to the full one
        yield sub_mesh_TPM, sub_camera_set, face_IDs

label_polygons(face_labels, polygons, face_weighting=None, sjoin_overlay=True, return_class_labels=True, unknown_class_label='unknown', buffer_dist_meters=2, n_polygons_per_cluster=1000)

Assign a class label to polygons using labels per face. This implementation is useful for large numbers of polygons. To make the expensive sjoin/overlay more efficient, this implementation first clusters the polygons and labels each cluster indepenently. This makes use of the fact that the mesh faces around this cluster can be extracted relatively quickly. Then the sjoin/overlay is computed with substaintially-fewer polygons and faces, leading to better performance.

Parameters:

Name Type Description Default
face_labels ndarray

(n_faces,) array of integer labels

required
polygons Union[PATH_TYPE, GeoDataFrame]

Geospatial polygons to be labeled

required
face_weighting Union[None, ndarray]

(n_faces,) array of scalar weights for each face, to be multiplied with the contribution of this face. Defaults to None.

None
sjoin_overlay bool

Whether to use gpd.sjoin or gpd.overlay to compute the overlay. Sjoin is substaintially faster, but only uses mesh faces that are entirely within the bounds of the polygon, rather than computing the intersecting region for partially-overlapping faces. Defaults to True.

True
return_class_labels bool

(bool, optional): Return string representation of class labels rather than float. Defaults to True.

True
unknown_class_label str

Label for predicted class for polygons with no overlapping faces. Defaults to "unknown".

'unknown'
buffer_dist_meters float

(Union[float, None], optional) Only applicable if sjoin_overlay=False. In that case, include faces entirely within the region that is this distance in meters from the polygons. Defaults to 2.0.

2
n_polygons_per_cluster int

(int): Set the number of clusters so there are approximately this number polygons per cluster on average. Defaults to 1000

1000

Raises:

Type Description
ValueError

if faces_labels or face_weighting is not 1D

Returns:

Name Type Description
list Union[str, int]

(n_polygons,) list of labels. Either float values, represnting integer IDs or nan, or string values representing the class label

Source code in geograypher/meshes/derived_meshes.py
def label_polygons(
    self,
    face_labels: np.ndarray,
    polygons: typing.Union[PATH_TYPE, gpd.GeoDataFrame],
    face_weighting: typing.Union[None, np.ndarray] = None,
    sjoin_overlay: bool = True,
    return_class_labels: bool = True,
    unknown_class_label: str = "unknown",
    buffer_dist_meters: float = 2,
    n_polygons_per_cluster: int = 1000,
):
    """
    Assign a class label to polygons using labels per face. This implementation is useful for
    large numbers of polygons. To make the expensive sjoin/overlay more efficient, this
    implementation first clusters the polygons and labels each cluster indepenently. This makes
    use of the fact that the mesh faces around this cluster can be extracted relatively quickly.
    Then the sjoin/overlay is computed with substaintially-fewer polygons and faces, leading to
    better performance.

    Args:
        face_labels (np.ndarray): (n_faces,) array of integer labels
        polygons (typing.Union[PATH_TYPE, gpd.GeoDataFrame]): Geospatial polygons to be labeled
        face_weighting (typing.Union[None, np.ndarray], optional):
            (n_faces,) array of scalar weights for each face, to be multiplied with the
            contribution of this face. Defaults to None.
        sjoin_overlay (bool, optional):
            Whether to use `gpd.sjoin` or `gpd.overlay` to compute the overlay. Sjoin is
            substaintially faster, but only uses mesh faces that are entirely within the bounds
            of the polygon, rather than computing the intersecting region for
            partially-overlapping faces. Defaults to True.
        return_class_labels: (bool, optional):
            Return string representation of class labels rather than float. Defaults to True.
        unknown_class_label (str, optional):
            Label for predicted class for polygons with no overlapping faces. Defaults to "unknown".
        buffer_dist_meters: (Union[float, None], optional)
            Only applicable if sjoin_overlay=False. In that case, include faces entirely within
            the region that is this distance in meters from the polygons. Defaults to 2.0.
        n_polygons_per_cluster: (int):
            Set the number of clusters so there are approximately this number polygons per
            cluster on average. Defaults to 1000

    Raises:
        ValueError: if faces_labels or face_weighting is not 1D

    Returns:
        list(typing.Union[str, int]):
            (n_polygons,) list of labels. Either float values, represnting integer IDs or nan,
            or string values representing the class label
    """
    # Load in the polygons
    polygons_gdf = ensure_projected_CRS(coerce_to_geoframe(polygons))
    # Extract the centroid of each one and convert to a numpy array
    centroids_xy = np.stack(
        polygons_gdf.centroid.apply(lambda point: (point.x, point.y))
    )
    # Determine how many clusters there should be
    n_clusters = int(np.ceil(len(polygons_gdf) / n_polygons_per_cluster))
    # Assign each polygon to a cluster
    polygon_cluster_IDs = KMeans(n_clusters=n_clusters).fit_predict(centroids_xy)

    # This will be set later once we figure out the datatype of the per-cluster labels
    all_labels = None

    # Loop over the individual clusters
    for cluster_ID in tqdm(range(n_clusters), desc="Clusters of polygons"):
        # Determine which polygons are part of that cluster
        cluster_mask = polygon_cluster_IDs == cluster_ID
        # Extract the polygons from one cluster
        cluster_polygons = polygons_gdf.iloc[cluster_mask]
        # Compute the labeling per polygon
        cluster_labels = super().label_polygons(
            face_labels,
            cluster_polygons,
            face_weighting,
            sjoin_overlay,
            return_class_labels,
            unknown_class_label,
            buffer_dist_meters,
        )
        # Convert to numpy array
        cluster_labels = np.array(cluster_labels)
        # Create the aggregation array with the appropriate datatype
        if all_labels is None:
            # We assume that this list will be at least one element since each cluster
            # should be non-empty. All values should be overwritten so the default value doesn't matter
            all_labels = np.zeros(len(polygons_gdf), dtype=cluster_labels.dtype)

        # Set the appropriate elements of the full array with the newly-computed cluster labels
        all_labels[cluster_mask] = cluster_labels

    # The output is expected to be a list
    all_labels = all_labels.tolist()
    return all_labels

render_flat(cameras, batch_size=1, render_img_scale=1, n_clusters=8, buffer_dist_meters=CHUNKED_MESH_BUFFER_DIST_METERS, vis_clusters=False, **pix2face_kwargs)

Render the texture from the viewpoint of each camera in cameras. Note that this is a generator so if you want to actually execute the computation, call list(*) on the output. This version first clusters the cameras, extracts a region of the mesh surrounding each cluster of cameras, and then performs rendering on each sub-region.

Parameters:

Name Type Description Default
cameras Union[PhotogrammetryCamera, PhotogrammetryCameraSet]

Either a single camera or a camera set. The texture will be rendered from the perspective of each one

required
batch_size int

The batch size for pix2face. Defaults to 1.

1
render_img_scale float

The rendered image will be this fraction of the original image corresponding to the virtual camera. Defaults to 1.

1
n_clusters int

Number of clusters to break the cameras into. Defaults to 8.

8
buffer_dist_meters float

How far around the cameras to include the mesh. Defaults to 50.

CHUNKED_MESH_BUFFER_DIST_METERS
vis_clusters bool

Should the clusters of camera locations be shown. Defaults to False.

False

Raises:

Type Description
TypeError

If cameras is not the correct type

Yields:

Type Description

np.ndarray: The pix2face array for the next camera. The shape is (int(img_hrender_img_scale), int(img_wrender_img_scale)).

Source code in geograypher/meshes/derived_meshes.py
def render_flat(
    self,
    cameras: typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet],
    batch_size: int = 1,
    render_img_scale: float = 1,
    n_clusters: int = 8,
    buffer_dist_meters: float = CHUNKED_MESH_BUFFER_DIST_METERS,
    vis_clusters: bool = False,
    **pix2face_kwargs,
):
    """
    Render the texture from the viewpoint of each camera in cameras. Note that this is a
    generator so if you want to actually execute the computation, call list(*) on the output.
    This version first clusters the cameras, extracts a region of the mesh surrounding each
    cluster of cameras, and then performs rendering on each sub-region.

    Args:
        cameras (typing.Union[PhotogrammetryCamera, PhotogrammetryCameraSet]):
            Either a single camera or a camera set. The texture will be rendered from the
            perspective of each one
        batch_size (int, optional):
            The batch size for pix2face. Defaults to 1.
        render_img_scale (float, optional):
            The rendered image will be this fraction of the original image corresponding to the
            virtual camera. Defaults to 1.
        n_clusters (int, optional):
            Number of clusters to break the cameras into. Defaults to 8.
        buffer_dist_meters (float, optional):
            How far around the cameras to include the mesh. Defaults to 50.
        vis_clusters (bool, optional):
            Should the clusters of camera locations be shown. Defaults to False.

    Raises:
        TypeError: If cameras is not the correct type

    Yields:
        np.ndarray:
           The pix2face array for the next camera. The shape is
           (int(img_h*render_img_scale), int(img_w*render_img_scale)).
    """
    # Create a generator to chunked meshes based on clusters of cameras
    chunk_gen = self.get_mesh_chunks_for_cameras(
        cameras,
        n_clusters=n_clusters,
        buffer_dist_meters=buffer_dist_meters,
        vis_clusters=vis_clusters,
        include_texture=True,
    )

    for sub_mesh_TPM, sub_camera_set, _ in tqdm(
        chunk_gen, total=n_clusters, desc="Rendering by chunks"
    ):
        # Create the render generator
        render_gen = sub_mesh_TPM.render_flat(
            sub_camera_set,
            batch_size=batch_size,
            render_img_scale=render_img_scale,
            **pix2face_kwargs,
        )
        # Yield items from the returned generator
        for render_item in render_gen:
            yield render_item