Commit e4f03c2f authored by 牛辰龙's avatar 牛辰龙

init

parents
# SSDR-AL
Active Learning for Point Cloud Semantic Segmentation via Spatial-Structural Diversity Reasoning
![image](https://user-images.githubusercontent.com/50863459/176915893-89db6fd7-1c35-414f-8cfd-c52822e18ee0.png)
# Environment
*1.* Setup python environment
```
conda create -n ssdr python=3.5
source activate ssdr
pip install -r helper_requirements.txt
sh compile_op.sh
```
*2.* Install additional Python packages:
```
pip install future python-igraph tqdm transforms3d pynvrtc fastrlock cupy h5py sklearn plyfile scipy pandas
```
*3.* Install Boost (1.63.0 or newer) and Eigen3, in Conda:<br>
```
conda install -c anaconda boost; conda install -c omnia eigen3; conda install eigen; conda install -c r libiconv
```
*4.* Compile the ```libply_c``` and ```libcp``` libraries:
```
CONDAENV=YOUR_CONDA_ENVIRONMENT_LOCATION
cd partition/ply_c
cmake . -DPYTHON_LIBRARY=$CONDAENV/lib/libpython3.6m.so -DPYTHON_INCLUDE_DIR=$CONDAENV/include/python3.6m -DBOOST_INCLUDEDIR=$CONDAENV/include -DEIGEN3_INCLUDE_DIR=$CONDAENV/include/eigen3
make
cd ..
cd cut-pursuit
mkdir build
cd build
cmake .. -DPYTHON_LIBRARY=$CONDAENV/lib/libpython3.6m.so -DPYTHON_INCLUDE_DIR=$CONDAENV/include/python3.6m -DBOOST_INCLUDEDIR=$CONDAENV/include -DEIGEN3_INCLUDE_DIR=$CONDAENV/include/eigen3
make
```
# Run
S3DIS dataset. Download the files named "Stanford3dDataset_v1.2_Aligned_Version.zip". Uncompress the folder and move it to
`/data/S3DIS`.
- Preparing the dataset:
```
python utils/data_prepare_s3dis.py
```
- Run:
```
./run_sota_comparison.sh
./run_graph_reasoning_analysis.sh
./run_threshold_analysis.sh
```
This diff is collapsed.
def get_sampler_args_str(sampler_args):
if len(sampler_args) == 0:
return ""
sampler_name = ""
for element in sampler_args:
sampler_name = sampler_name + element + "-"
return sampler_name[:-1]
def get_w(w):
s = ""
for key in w:
s = s + ", " + key + "=" + str(w[key])
return s
cd utils/nearest_neighbors
python setup.py install --home="."
cd ../../
cd utils/cpp_wrappers
sh compile_wrappers.sh
cd ../../../
\ No newline at end of file
import pickle
import time
from os.path import join
import numpy as np
from sklearn.neighbors import KDTree
from helper_ply import read_ply
from kcenterGreedy import *
def chamfer_distance(cloud_list, tree_list, centroid_idx):
"""numpy"""
centroid_cloud = cloud_list[centroid_idx]
centroid_tree = tree_list[centroid_idx]
distances = np.zeros([len(cloud_list)])
for i in range(len(cloud_list)):
if i != centroid_idx:
distances1, _ = centroid_tree.query(cloud_list[i])
distances2, _ = tree_list[i].query(centroid_cloud)
av_dist1 = np.mean(distances1)
av_dist2 = np.mean(distances2)
distances[i] = av_dist1 + av_dist2
return distances
def create_cd(superpoint_list, superpoint_centroid_list):
"""numpy"""
sp_num = len(superpoint_list)
cd_dist = np.zeros([sp_num, sp_num])
align_superpoint_list = []
tree_list = []
for i in range(sp_num):
align_superpoint = superpoint_list[i] - superpoint_centroid_list[i]
align_superpoint_list.append(align_superpoint)
tree_list.append(KDTree(align_superpoint))
for i in range(sp_num):
cd_dist[i] = chamfer_distance(align_superpoint_list, tree_list, i)
return cd_dist
def fps_adj_all(labeled_select_ref, unlabeled_candidate_ref, input_path, data_path):
begin_time = time.time()
unlabeled_num = len(unlabeled_candidate_ref)
labeled_num = len(labeled_select_ref)
N = unlabeled_num + labeled_num
total_cloud = {} # {cloud_name: [{sp_idx, ref_idx}]}
cloud_name_list = []
for i in range(unlabeled_num):
cloud_name = unlabeled_candidate_ref[i]["cloud_name"]
sp_idx = unlabeled_candidate_ref[i]["sp_idx"]
if cloud_name not in total_cloud:
total_cloud[cloud_name] = []
cloud_name_list.append(cloud_name)
total_cloud[cloud_name].append({"sp_idx": sp_idx, "ref_idx": i})
for i in range(labeled_num):
cloud_name = labeled_select_ref[i]["cloud_name"]
sp_idx = labeled_select_ref[i]["sp_idx"]
if cloud_name not in total_cloud:
total_cloud[cloud_name] = []
cloud_name_list.append(cloud_name)
total_cloud[cloud_name].append({"sp_idx": sp_idx, "ref_idx": unlabeled_num + i})
# print("ed,cd below")
A_ed = np.ones([N, N], dtype=np.float) * 1e10
A_cd = np.ones([N, N], dtype=np.float) * 1e10
cloud_name_list_len = len(cloud_name_list)
for i in range(cloud_name_list_len):
cloud_name = cloud_name_list[i]
with open(join(data_path, "superpoint",
cloud_name + ".superpoint"), "rb") as f:
sp = pickle.load(f)
components = sp["components"]
data = read_ply(
join(input_path, '{:s}.ply'.format(cloud_name)))
xyz = np.vstack((data['x'], data['y'], data['z'])).T # shape=[point_number, 3]
source_ref_idx_list = []
one_cloud_candicate_superpoints = []
one_cloud_center_xyz = np.zeros([len(total_cloud[cloud_name]), 3])
one_cloud_center_xyz_len = len(one_cloud_center_xyz)
for j in range(one_cloud_center_xyz_len):
# print(cloud_name_list_len, i, "f1", one_cloud_center_xyz_len, j)
source_sp_idx = total_cloud[cloud_name][j]["sp_idx"]
source_ref_idx_list.append(total_cloud[cloud_name][j]["ref_idx"])
x_y_z = xyz[components[source_sp_idx]]
one_cloud_center_xyz[j, 0] = (np.min(x_y_z[:, 0]) + np.max(x_y_z[:, 0])) / 2.0
one_cloud_center_xyz[j, 1] = (np.min(x_y_z[:, 1]) + np.max(x_y_z[:, 1])) / 2.0
one_cloud_center_xyz[j, 2] = (np.min(x_y_z[:, 2]) + np.max(x_y_z[:, 2])) / 2.0
one_cloud_candicate_superpoints.append(x_y_z)
one_clound_cd_dist = create_cd(superpoint_list=one_cloud_candicate_superpoints,
superpoint_centroid_list=one_cloud_center_xyz)
for j in range(one_cloud_center_xyz_len):
# print(cloud_name_list_len, i, "f2", one_cloud_center_xyz_len, j)
ssdr = one_cloud_center_xyz - one_cloud_center_xyz[j]
dist = np.sqrt(np.sum(np.multiply(ssdr, ssdr), axis=1))
A_ed[source_ref_idx_list[j], source_ref_idx_list] = dist
A_cd[source_ref_idx_list[j], source_ref_idx_list] = one_clound_cd_dist[j]
# print("tensor", 3)
adj = np.exp(-np.add(A_ed, A_cd))
# print("tensor", 4)
adj += -1.0 * np.eye(adj.shape[0]) # S-I
# print("tensor", 5)
adj_diag = np.sum(adj, axis=1) # rowise sum
d_inv = np.power(adj_diag, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = np.diag(d_inv)
# print("tensor", 6)
adj = np.matmul(adj, d_mat_inv)
# print("tensor", 7)
adj = adj + np.eye(adj.shape[0]) # D^(-1)(S-I) + I
# print("tensor", 8)
return adj, time.time() - begin_time
def farthest_features_sample(feature_list, sample_number):
"""
Input:
superpoint_list: pointcloud data, [sp_num, each_sp_p_num, 3]
superpoint_centroid_list: pointcloud centroid xyz [sp_num, 3]
sample_number: number of samples
Return:
centroids: sampled superpoint index, [sample_number]
"""
list_num = len(feature_list)
feature_list = np.array(feature_list)
centroids = np.zeros([sample_number], dtype=np.int32)
centroids[0] = np.random.randint(0, list_num)
distance = np.ones([list_num]) * 1e10
for i in range(sample_number - 1):
current_superpoint_center = feature_list[centroids[i]]
dist = np.sum((feature_list - current_superpoint_center) ** 2, axis=-1)
mask = dist < distance
distance[mask] = dist[mask]
centroids[i + 1] = np.argmax(distance)
return centroids
def GCN_FPS_sampling(labeled_select_features, labeled_select_ref, unlabeled_candidate_features, unlabeled_candidate_ref, input_path, data_path, sampling_batch, gcn_number, gcn_top):
adj, _ = fps_adj_all(labeled_select_ref=labeled_select_ref, unlabeled_candidate_ref=unlabeled_candidate_ref, input_path=input_path, data_path=data_path)
if gcn_top > 0:
gcn_top = int(gcn_top)
mask = np.zeros(adj.shape)
source_idx = np.repeat(np.expand_dims(np.arange(adj.shape[0]), axis=1), repeats=gcn_top, axis=1)
arg_idx = np.argsort(adj, axis=1)[:, -gcn_top:]
mask[source_idx, arg_idx] = 1.0
adj = np.multiply(adj, mask)
featuresV = np.concatenate([unlabeled_candidate_features, labeled_select_features])
featureV_list = [featuresV]
for i in range(int(gcn_number)):
featuresV = np.matmul(adj, featuresV)
featureV_list.append(featuresV)
combinational_features = np.sum(featureV_list, axis=0)
unlabeled_num = len(unlabeled_candidate_features)
selected_ids = farthest_features_sample(combinational_features[:unlabeled_num], sampling_batch)
file_list = {}
for i in selected_ids:
cloud_name, sp_idx = unlabeled_candidate_ref[i]["cloud_name"], unlabeled_candidate_ref[i]["sp_idx"]
if cloud_name not in file_list:
file_list[cloud_name] = []
file_list[cloud_name].append(sp_idx)
return file_list
This diff is collapsed.
import numpy as np
import sys
# Define PLY types
ply_dtypes = dict([
(b'int8', 'i1'),
(b'char', 'i1'),
(b'uint8', 'u1'),
(b'uchar', 'u1'),
(b'int16', 'i2'),
(b'short', 'i2'),
(b'uint16', 'u2'),
(b'ushort', 'u2'),
(b'int32', 'i4'),
(b'int', 'i4'),
(b'uint32', 'u4'),
(b'uint', 'u4'),
(b'float32', 'f4'),
(b'float', 'f4'),
(b'float64', 'f8'),
(b'double', 'f8')
])
# Numpy reader format
valid_formats = {'ascii': '', 'binary_big_endian': '>',
'binary_little_endian': '<'}
# ----------------------------------------------------------------------------------------------------------------------
#
# Functions
# \***************/
#
def parse_header(plyfile, ext):
# Variables
line = []
properties = []
num_points = None
while b'end_header' not in line and line != b'':
line = plyfile.readline()
if b'element' in line:
line = line.split()
num_points = int(line[2])
elif b'property' in line:
line = line.split()
properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
return num_points, properties
def parse_mesh_header(plyfile, ext):
# Variables
line = []
vertex_properties = []
num_points = None
num_faces = None
current_element = None
while b'end_header' not in line and line != b'':
line = plyfile.readline()
# Find point element
if b'element vertex' in line:
current_element = 'vertex'
line = line.split()
num_points = int(line[2])
elif b'element face' in line:
current_element = 'face'
line = line.split()
num_faces = int(line[2])
elif b'property' in line:
if current_element == 'vertex':
line = line.split()
vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
elif current_element == 'vertex':
if not line.startswith('property list uchar int'):
raise ValueError('Unsupported faces property : ' + line)
return num_points, num_faces, vertex_properties
def read_ply(filename, triangular_mesh=False):
"""
Read ".ply" files
Parameters
----------
filename : string
the name of the file to read.
Returns
-------
result : array
data stored in the file
Examples
--------
Store data in file
>>> points = np.random.rand(5, 3)
>>> values = np.random.randint(2, size=10)
>>> write_ply('example.ply', [points, values], ['x', 'y', 'z', 'values'])
Read the file
>>> data = read_ply('example.ply')
>>> values = data['values']
array([0, 0, 1, 1, 0])
>>> points = np.vstack((data['x'], data['y'], data['z'])).T
array([[ 0.466 0.595 0.324]
[ 0.538 0.407 0.654]
[ 0.850 0.018 0.988]
[ 0.395 0.394 0.363]
[ 0.873 0.996 0.092]])
"""
with open(filename, 'rb') as plyfile:
# Check if the file start with ply
if b'ply' not in plyfile.readline():
raise ValueError('The file does not start whith the word ply')
# get binary_little/big or ascii
fmt = plyfile.readline().split()[1].decode()
if fmt == "ascii":
raise ValueError('The file is not binary')
# get extension for building the numpy dtypes
ext = valid_formats[fmt]
# PointCloud reader vs mesh reader
if triangular_mesh:
# Parse header
num_points, num_faces, properties = parse_mesh_header(plyfile, ext)
# Get point data
vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points)
# Get face data
face_properties = [('k', ext + 'u1'),
('v1', ext + 'i4'),
('v2', ext + 'i4'),
('v3', ext + 'i4')]
faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces)
# Return vertex data and concatenated faces
faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T
data = [vertex_data, faces]
else:
# Parse header
num_points, properties = parse_header(plyfile, ext)
# Get data
data = np.fromfile(plyfile, dtype=properties, count=num_points)
return data
def header_properties(field_list, field_names):
# List of lines to write
lines = []
# First line describing element vertex
lines.append('element vertex %d' % field_list[0].shape[0])
# Properties lines
i = 0
for fields in field_list:
for field in fields.T:
lines.append('property %s %s' % (field.dtype.name, field_names[i]))
i += 1
return lines
def write_ply(filename, field_list, field_names, triangular_faces=None):
"""
Write ".ply" files
Parameters
----------
filename : string
the name of the file to which the data is saved. A '.ply' extension will be appended to the
file name if it does no already have one.
field_list : list, tuple, numpy array
the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a
tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered
as one field.
field_names : list
the name of each fields as a list of strings. Has to be the same length as the number of
fields.
Examples
--------
>>> points = np.random.rand(10, 3)
>>> write_ply('example1.ply', points, ['x', 'y', 'z'])
>>> values = np.random.randint(2, size=10)
>>> write_ply('example2.ply', [points, values], ['x', 'y', 'z', 'values'])
>>> colors = np.random.randint(255, size=(10,3), dtype=np.uint8)
>>> field_names = ['x', 'y', 'z', 'red', 'green', 'blue', values']
>>> write_ply('example3.ply', [points, colors, values], field_names)
"""
# Format list input to the right form
field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,))
for i, field in enumerate(field_list):
if field.ndim < 2:
field_list[i] = field.reshape(-1, 1)
if field.ndim > 2:
print('fields have more than 2 dimensions')
return False
# check all fields have the same number of data
n_points = [field.shape[0] for field in field_list]
if not np.all(np.equal(n_points, n_points[0])):
print('wrong field dimensions')
return False
# Check if field_names and field_list have same nb of column
n_fields = np.sum([field.shape[1] for field in field_list])
if (n_fields != len(field_names)):
print('wrong number of field names')
return False
# Add extension if not there
if not filename.endswith('.ply'):
filename += '.ply'
# open in text mode to write the header
with open(filename, 'w') as plyfile:
# First magical word
header = ['ply']
# Encoding format
header.append('format binary_' + sys.byteorder + '_endian 1.0')
# Points properties description
header.extend(header_properties(field_list, field_names))
# Add faces if needded
if triangular_faces is not None:
header.append('element face {:d}'.format(triangular_faces.shape[0]))
header.append('property list uchar int vertex_indices')
# End of header
header.append('end_header')
# Write all lines
for line in header:
plyfile.write("%s\n" % line)
# open in binary/append to use tofile
with open(filename, 'ab') as plyfile:
# Create a structured array
i = 0
type_list = []
for fields in field_list:
for field in fields.T:
type_list += [(field_names[i], field.dtype.str)]
i += 1
data = np.empty(field_list[0].shape[0], dtype=type_list)
i = 0
for fields in field_list:
for field in fields.T:
data[field_names[i]] = field
i += 1
data.tofile(plyfile)
if triangular_faces is not None:
triangular_faces = triangular_faces.astype(np.int32)
type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)]
data = np.empty(triangular_faces.shape[0], dtype=type_list)
data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8)
data['0'] = triangular_faces[:, 0]
data['1'] = triangular_faces[:, 1]
data['2'] = triangular_faces[:, 2]
data.tofile(plyfile)
return True
def describe_element(name, df):
""" Takes the columns of the dataframe and builds a ply-like description
Parameters
----------
name: str
df: pandas DataFrame
Returns
-------
element: list[str]
"""
property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'}
element = ['element ' + name + ' ' + str(len(df))]
if name == 'face':
element.append("property list uchar int points_indices")
else:
for i in range(len(df.columns)):
# get first letter of dtype to infer format
f = property_formats[str(df.dtypes[i])[0]]
element.append('property ' + f + ' ' + df.columns.values[i])
return element
numpy==1.16.1
h5py==2.10.0
cython==0.29.15
open3d-python==0.3.0
pandas==0.25.3
scikit-learn==0.21.3
scipy==1.4.1
PyYAML==5.4
This diff is collapsed.
This diff is collapsed.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn.metrics import pairwise_distances
from scipy.spatial import distance
import abc
import numpy as np
class SamplingMethod(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, X, y, seed, **kwargs):
self.X = X
self.y = y
self.seed = seed
def flatten_X(self):
shape = self.X.shape
flat_X = self.X
if len(shape) > 2:
flat_X = np.reshape(self.X, (shape[0],np.product(shape[1:])))
return flat_X
@abc.abstractmethod
def select_batch_(self):
return
def select_batch(self, **kwargs):
return self.select_batch_(**kwargs)
def select_batch_unc_(self, **kwargs):
return self.select_batch_unc_(**kwargs)
def to_dict(self):
return None
class kCenterGreedy(SamplingMethod):
def __init__(self, X, metric='euclidean'):
self.X = X
# self.y = y
self.flat_X = self.flatten_X()
self.name = 'kcenter'
self.features = self.flat_X
self.metric = metric
self.min_distances = None
self.max_distances = None
self.n_obs = self.X.shape[0]
self.already_selected = []
def update_distances(self, cluster_centers, only_new=True, reset_dist=False):
"""Update min distances given cluster centers.
Args:
cluster_centers: indices of cluster centers
only_new: only calculate distance for newly selected points and update
min_distances.
rest_dist: whether to reset min_distances.
"""
if reset_dist:
self.min_distances = None
if only_new:
cluster_centers = [d for d in cluster_centers
if d not in self.already_selected]
if cluster_centers:
x = self.features[cluster_centers]
# Update min_distances for all examples given new cluster center.
dist = pairwise_distances(self.features, x, metric=self.metric)#,n_jobs=4)
if self.min_distances is None:
self.min_distances = np.min(dist, axis=1).reshape(-1,1)
else:
self.min_distances = np.minimum(self.min_distances, dist)
def select_batch_(self, already_selected, N, **kwargs):
"""
Diversity promoting active learning method that greedily forms a batch
to minimize the maximum distance to a cluster center among all unlabeled
datapoints.
Args:
model: model with scikit-like API with decision_function implemented
already_selected: index of datapoints already selected
N: batch size
Returns:
indices of points selected to minimize distance to cluster centers
"""
try:
# Assumes that the transform function takes in original data and not
# flattened data.
print('Getting transformed features...')
# self.features = model.transform(self.X)
print('Calculating distances...')
self.update_distances(already_selected, only_new=False, reset_dist=True)
except:
print('Using flat_X as features.')
self.update_distances(already_selected, only_new=True, reset_dist=False)
new_batch = []
for _ in range(N):
if self.already_selected is None:
# Initialize centers with a randomly selected datapoint
ind = np.random.choice(np.arange(self.n_obs))
else:
ind = np.argmax(self.min_distances)
# New examples should not be in already selected since those points
# should have min_distance of zero to a cluster center.
assert ind not in already_selected
self.update_distances([ind], only_new=True, reset_dist=False)
new_batch.append(ind)
print('Maximum distance from cluster centers is %0.2f'
% max(self.min_distances))
self.already_selected = already_selected
return new_batch
import os.path
import argparse
from graphs import compute_graph_nn_2
# from provider import *
from helper_ply import read_ply
import glob
import pickle
import os
import numpy as np
import sys
sys.path.append("partition/cut-pursuit/build/src")
sys.path.append("cut-pursuit/build/src")
sys.path.append("ply_c")
sys.path.append("./partition/ply_c")
sys.path.append("./partition")
import libcp
import libply_c
def s3dis_superpoint(args, val_split):
path = "data/S3DIS"
all_files = glob.glob(os.path.join(path, 'original_ply', '*.ply'))
output_dir = os.path.join(path, str(args.reg_strength), "superpoint")
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
tree_path = os.path.join(path, 'input_{:.3f}'.format(0.04))
total_obj = {}
total_obj["unlabeled"] = {}
sp_num = 0
file_num = 0
point_num = 0
for i, file_path in enumerate(all_files):
print(file_path)
cloud_name = file_path.split('/')[-1][:-4]
if val_split not in cloud_name:
sub_ply_file = os.path.join(tree_path, '{:s}.ply'.format(cloud_name))
data = read_ply(sub_ply_file)
rgb = np.vstack((data['red'], data['green'], data['blue'])).T
xyz = np.vstack((data['x'], data['y'], data['z'])).T
# xyz = xyz.astype('f4')
# rgb = rgb.astype('uint8')
# ---compute 10 nn graph-------
graph_nn, target_fea = compute_graph_nn_2(xyz, args.k_nn_adj, args.k_nn_geof)
# ---compute geometric features-------
geof = libply_c.compute_geof(xyz, target_fea, args.k_nn_geof).astype(
'float32')
del target_fea
# --compute the partition------
# --- build the spg h5 file --
features = np.hstack((geof, rgb)).astype('float32') # add rgb as a feature for partitioning
features[:, 3] = 2. * features[:, 3] # increase importance of verticality (heuristic)
graph_nn["edge_weight"] = np.array(
1. / (args.lambda_edge_weight + graph_nn["distances"] / np.mean(graph_nn["distances"])),
dtype='float32')
print("minimal partition...")
components, in_component = libcp.cutpursuit(features, graph_nn["source"], graph_nn["target"]
, graph_nn["edge_weight"], args.reg_strength)
components = np.array(components, dtype='object')
sp = {}
sp["components"] = components
sp["in_component"] = in_component
with open(os.path.join(output_dir, cloud_name+".superpoint"),"wb") as f:
pickle.dump(sp, f)
pseudo_gt = np.zeros([2, len(xyz)], dtype=np.float32)
with open(os.path.join(output_dir, cloud_name+".gt"), "wb") as f:
pickle.dump(pseudo_gt, f)
sp_num = sp_num + len(components)
file_num = file_num + 1
point_num = point_num + len(xyz)
total_obj["unlabeled"][cloud_name] = np.arange(len(components))
total_obj["file_num"] = file_num
total_obj["sp_num"] = sp_num
total_obj["point_num"] = point_num
with open(os.path.join(output_dir, "total.pkl"), "wb") as f:
pickle.dump(total_obj, f)
print("file_num", file_num, "sp_num", sp_num, "point_num", point_num)
def test_superpoint_distribution(args):
all_files = glob.glob(os.path.join('data/S3DIS', str(args.reg_strength), 'superpoint', '*.superpoint'))
sp_count = 0
point_count = 0
dis = np.zeros([10000])
for i, file_path in enumerate(all_files):
with open(file_path, "rb") as f:
superpoint = pickle.load(f)
components = superpoint["components"]
sp_count = sp_count + len(components)
for sp in components:
sp_size = len(sp)
point_count = point_count + sp_size
tt = int(sp_size / 10)
dis[tt] = dis[tt] + 1
mean_size = point_count / sp_count
print("######### test_superpoint_less_than_5")
for i in range(len(dis)):
if dis[i] > 0:
print(str(i*10)+"-"+str((i+1)*10)+": " + str(dis[i]))
print("point_count=" + str(point_count), "sp_count=" + str(sp_count), "mean_size=" + str(mean_size))
print("#####################################")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='123434234')
parser.add_argument('--dataset', default='s3dis', help='s3dis/sema3d/your_dataset')
parser.add_argument('--k_nn_geof', default=45, type=int, help='number of neighbors for the geometric features')
parser.add_argument('--k_nn_adj', default=10, type=int, help='adjacency structure for the minimal partition')
parser.add_argument('--lambda_edge_weight', default=1., type=float,
help='parameter determine the edge weight for minimal part.')
parser.add_argument('--reg_strength', default=0.008, type=float,
help='regularization strength for the minimal partition')
parser.add_argument('--test_area', type=int, default=5, help='Which area to use for test, option: 1-6 [default: 5]')
args = parser.parse_args()
s3dis_superpoint(args, "Area_5")
test_superpoint_distribution(args)
\ No newline at end of file
import os.path
import argparse
from graphs import compute_graph_nn_2
# from provider import *
from helper_ply import read_ply
import glob
import pickle
import os
import numpy as np
import sys
sys.path.append("partition/cut-pursuit/build/src")
sys.path.append("cut-pursuit/build/src")
sys.path.append("ply_c")
sys.path.append("./partition/ply_c")
sys.path.append("./partition")
import libcp
import libply_c
# import pydevd_pycharm
# pydevd_pycharm.settrace('10.214.160.245', port=11111, stdoutToServer=True, stderrToServer=True)
train_cloud_name_list = ['bildstein_station1_xyz_intensity_rgb',
'bildstein_station5_xyz_intensity_rgb',
'domfountain_station1_xyz_intensity_rgb',
'domfountain_station2_xyz_intensity_rgb',
'domfountain_station3_xyz_intensity_rgb',
'neugasse_station1_xyz_intensity_rgb',
'sg27_station1_intensity_rgb',
'sg27_station4_intensity_rgb',
'sg27_station5_intensity_rgb',
'sg27_station9_intensity_rgb',
'sg28_station4_intensity_rgb',
'untermaederbrunnen_station1_xyz_intensity_rgb',
'untermaederbrunnen_station3_xyz_intensity_rgb']
val_cloud_name_list = ['bildstein_station3_xyz_intensity_rgb',
'sg27_station2_intensity_rgb']
test_cloud_name_list = ['MarketplaceFeldkirch_Station4_rgb_intensity-reduced',
'sg27_station10_rgb_intensity-reduced',
'sg28_Station2_rgb_intensity-reduced',
'StGallenCathedral_station6_rgb_intensity-reduced']
def semantic3d_superpoint(args):
path = "data/semantic3d"
output_dir = os.path.join(path, str(args.reg_strength), "superpoint")
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
tree_path = os.path.join(path, 'input_{:.3f}'.format(0.06))
total_obj = {}
total_obj["unlabeled"] = {}
sp_num = 0
file_num = 0
point_num = 0
for cloud_name in train_cloud_name_list:
sub_ply_file = os.path.join(tree_path, '{:s}.ply'.format(cloud_name))
data = read_ply(sub_ply_file)
rgb = np.vstack((data['red'], data['green'], data['blue'])).T
xyz = np.vstack((data['x'], data['y'], data['z'])).T
# xyz = xyz.astype('f4')
# rgb = rgb.astype('uint8')
# ---compute 10 nn graph-------
graph_nn, target_fea = compute_graph_nn_2(xyz, args.k_nn_adj, args.k_nn_geof)
# ---compute geometric features-------
geof = libply_c.compute_geof(xyz, target_fea, args.k_nn_geof).astype(
'float32')
del target_fea
# --compute the partition------
# --- build the spg h5 file --
features = geof
geof[:, 3] = 2. * geof[:, 3]
graph_nn["edge_weight"] = np.array(
1. / (args.lambda_edge_weight + graph_nn["distances"] / np.mean(graph_nn["distances"])),
dtype='float32')
print("minimal partition...")
components, in_component = libcp.cutpursuit(features, graph_nn["source"], graph_nn["target"]
, graph_nn["edge_weight"], args.reg_strength)
components = np.array(components, dtype='object')
sp = {}
sp["components"] = components
sp["in_component"] = in_component
with open(os.path.join(output_dir, cloud_name+".superpoint"),"wb") as f:
pickle.dump(sp, f)
pseudo_gt = np.zeros([2, len(xyz)], dtype=np.float32)
with open(os.path.join(output_dir, cloud_name+".gt"), "wb") as f:
pickle.dump(pseudo_gt, f)
sp_num = sp_num + len(components)
file_num = file_num + 1
point_num = point_num + len(xyz)
total_obj["unlabeled"][cloud_name] = np.arange(len(components))
total_obj["file_num"] = file_num
total_obj["sp_num"] = sp_num
total_obj["point_num"] = point_num
with open(os.path.join(output_dir, "total.pkl"), "wb") as f:
pickle.dump(total_obj, f)
print("file_num", file_num, "sp_num", sp_num, "point_num", point_num)
def test_superpoint_distribution(args):
all_files = glob.glob(os.path.join('data/semantic3d', str(args.reg_strength), 'superpoint', '*.superpoint'))
sp_count = 0
point_count = 0
dis = np.zeros([1000000])
for i, file_path in enumerate(all_files):
with open(file_path, "rb") as f:
superpoint = pickle.load(f)
components = superpoint["components"]
sp_count = sp_count + len(components)
for sp in components:
sp_size = len(sp)
point_count = point_count + sp_size
tt = int(sp_size / 10)
dis[tt] = dis[tt] + 1
mean_size = point_count / sp_count
print("######### test_superpoint_less_than_5")
for i in range(len(dis)):
if dis[i] > 0:
print(str(i*10)+"-"+str((i+1)*10)+": " + str(dis[i]))
print("point_count=" + str(point_count), "sp_count=" + str(sp_count), "mean_size=" + str(mean_size))
print("#####################################")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='321312')
parser.add_argument('--dataset', default='semantic3d', help='s3dis/semantic3d/your_dataset')
parser.add_argument('--k_nn_geof', default=45, type=int, help='number of neighbors for the geometric features')
parser.add_argument('--k_nn_adj', default=10, type=int, help='adjacency structure for the minimal partition')
parser.add_argument('--lambda_edge_weight', default=1., type=float,
help='parameter determine the edge weight for minimal part.')
parser.add_argument('--reg_strength', default=0.012, type=float,
help='regularization strength for the minimal partition')
args = parser.parse_args()
semantic3d_superpoint(args)
test_superpoint_distribution(args)
\ No newline at end of file
import os.path
import argparse
from graphs import compute_graph_nn_2
# from provider import *
from helper_ply import read_ply
import glob
import pickle
import os
import numpy as np
import sys
sys.path.append("partition/cut-pursuit/build/src")
sys.path.append("cut-pursuit/build/src")
sys.path.append("ply_c")
sys.path.append("./partition/ply_c")
sys.path.append("./partition")
import libcp
import libply_c
# import pydevd_pycharm
# pydevd_pycharm.settrace('10.214.160.245', port=11111, stdoutToServer=True, stderrToServer=True)
def semantickitti_superpoint(args, val_split):
path = "data/SemanticKITTI"
all_files = glob.glob(os.path.join(path, 'input_{:.3f}'.format(0.06), '*.ply'))
output_dir = os.path.join(path, str(args.reg_strength), "superpoint")
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
tree_path = os.path.join(path, 'input_{:.3f}'.format(0.06))
total_obj = {}
total_obj["unlabeled"] = {}
sp_num = 0
file_num = 0
point_num = 0
for i, file_path in enumerate(all_files):
print(file_path)
cloud_name = file_path.split('/')[-1][:-4] # 获取去掉后缀的文件名
if val_split not in cloud_name:
sub_ply_file = os.path.join(tree_path, '{:s}.ply'.format(cloud_name))
data = read_ply(sub_ply_file)
xyz = np.vstack((data['x'], data['y'], data['z'])).T # shape=[point_number, 3]
# xyz = xyz.astype('f4')
# rgb = rgb.astype('uint8')
# ---compute 10 nn graph-------
graph_nn, target_fea = compute_graph_nn_2(xyz, args.k_nn_adj, args.k_nn_geof)
# ---compute geometric features-------
geof = libply_c.compute_geof(xyz, target_fea, args.k_nn_geof).astype(
'float32') # shape=【point_number,4】 。每个point聚合k_nn_geof个临近points,生成包含【linearity,planarity,scattering,verticality】的特征
del target_fea
# --compute the partition------
# --- build the spg h5 file --
features = geof
geof[:, 3] = 2. * geof[:, 3]
graph_nn["edge_weight"] = np.array(
1. / (args.lambda_edge_weight + graph_nn["distances"] / np.mean(graph_nn["distances"])),
dtype='float32')
print("minimal partition...")
# components [sp_idx, point_ids] 是一个二维list ,
# in_component [point_idx, sp_idx], in_component 中的in 就是 jndex 缩写
components, in_component = libcp.cutpursuit(features, graph_nn["source"], graph_nn["target"]
, graph_nn["edge_weight"], args.reg_strength)
components = np.array(components, dtype='object')
sp = {}
sp["components"] = components
sp["in_component"] = in_component
with open(os.path.join(output_dir, cloud_name + ".superpoint"), "wb") as f:
pickle.dump(sp, f)
pseudo_gt = np.zeros([2, len(xyz)], dtype=np.float32)
with open(os.path.join(output_dir, cloud_name + ".gt"), "wb") as f:
pickle.dump(pseudo_gt, f)
sp_num = sp_num + len(components)
file_num = file_num + 1
point_num = point_num + len(xyz)
total_obj["unlabeled"][cloud_name] = np.arange(len(components))
total_obj["file_num"] = file_num
total_obj["sp_num"] = sp_num
total_obj["point_num"] = point_num
with open(os.path.join(output_dir, "total.pkl"), "wb") as f:
pickle.dump(total_obj, f)
print("file_num", file_num, "sp_num", sp_num, "point_num", point_num)
def test_superpoint_distribution(args):
all_files = glob.glob(os.path.join('data/SemanticKITTI', str(args.reg_strength), 'superpoint', '*.superpoint'))
sp_count = 0
point_count = 0
dis = np.zeros([10000])
for i, file_path in enumerate(all_files):
with open(file_path, "rb") as f:
superpoint = pickle.load(f)
components = superpoint["components"]
sp_count = sp_count + len(components)
for sp in components:
sp_size = len(sp)
point_count = point_count + sp_size
tt = int(sp_size / 10)
dis[tt] = dis[tt] + 1
mean_size = point_count / sp_count
print("######### test_superpoint_less_than_5")
for i in range(len(dis)):
if dis[i] > 0:
print(str(i*10)+"-"+str((i+1)*10)+": " + str(dis[i]))
print("point_count=" + str(point_count), "sp_count=" + str(sp_count), "mean_size=" + str(mean_size))
print("#####################################")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='123434234')
parser.add_argument('--k_nn_geof', default=45, type=int, help='number of neighbors for the geometric features')
parser.add_argument('--k_nn_adj', default=10, type=int, help='adjacency structure for the minimal partition')
parser.add_argument('--lambda_edge_weight', default=1., type=float,
help='parameter determine the edge weight for minimal part.')
parser.add_argument('--reg_strength', default=0.012, type=float,
help='regularization strength for the minimal partition')
args = parser.parse_args()
semantickitti_superpoint(args, "08-")
test_superpoint_distribution(args)
\ No newline at end of file
*.vscode
*.dylib
*.so
build/
cmake-build-*
*.DS_Store
.devcontainer.json
Dockerfile
*.idea
\ No newline at end of file
# top-level CMake configuration file
cmake_minimum_required(VERSION 3.5)
project(CUT_PURSUIT_SEG)
#------------------------------------------------------------------------------
# internal cmake settings
#------------------------------------------------------------------------------
set(CMAKE_COLOR_MAKEFILE ON)
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake ${CMAKE_MODULE_PATH})
set(CMAKE_MACOSX_RPATH ON)
include(FeatureSummary)
#------------------------------------------------------------------------------
# General settings
#------------------------------------------------------------------------------
set (CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -O3 -std=c++11")
#------------------------------------------------------------------------------
# actual library
#------------------------------------------------------------------------------
add_subdirectory(src)
\ No newline at end of file
MIT License
Copyright (c) 2018 Loic Landrieu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# cut pursuit: a working-set strategy to compute piecewise constant functions on graphs
C/C++ implementation of the L0-cut pursuit algorithms with Matlab and Python interfaces.
Cut pursuit is a graph-cut-based working-set strategy to minimize functions regularized by graph-structured regularizers.
For _G_ = (_V_, _E_, _w_) a graph with edges weighted by _w_, the problem writes:
    min<sub>_x_ ∈ _Ω_<sup>_V_</sup></sub>    _f_(_x_) +
<sub>(_u_,_v_) ∈ _E_</sub> _w_<sub>(_u_,_v_)</sub>
_φ_(_x_<sub>_u_</sub> - _x_<sub>_v_</sub>)
where _Ω_ is the space in which lie the values associated with each node.
We distinguish two different cases for _φ_, corresponding to different implementations:
- _φ_: _t_ ↦ |_t_|: the convex case, the regularizer is the __graph total variation__.
Implemented for many different functionals _f_, such as quadratic, ℓ<sub>1</sub>-norm, box constraints, simplex constraints, linear, smoothed Kullback–Leibler.
See repository [CP_PFDR_graph_d1], by Hugo Raguet. It is well-suited for regularization and inverse problems with a low total variation prior.
- _φ_: _t_ ↦ _δ_(_t_ ≠ 0) = 1 - _δ_<sub>0</sub>(t): the nonconvex case, the regularizer is the weight of the cut between the adjacent constant components. It is well-suited for segmentation/partitioning tasks. This repository corresponds to this problem.
Current implementation supports the following fidelity functions:
- quadratic fidelity: _φ_: _x_ ↦ ∑<sub>_v_ in _V_</sub>||_x_<sub>_v_</sub> - _y_<sub>_v_</sub>||² with y an observed value associated with node _v_ (best for partitioning)
- linear fidelity: _φ_: _x_ ↦ - ∑<sub>_v_ in _V_</sub><_x_<sub>_v_</sub>, _y_<sub>_v_</sub>> with _y_<sub>_v_</sub> a weight associated with node _v_
- Kullback leibler fidelity _φ_: _x_ ↦ ∑<sub>_v_ in _V_</sub> KL(_x_<sub>_v_</sub>, _p_<sub>_v_</sub>) with _p_<sub>_v_</sub> a probability associated with node _v_. Only apply when _Ω_ is a simplex
# Requirement
You need boost 1.58, or 1.65 if you want the python wrapper.
```conda install -c anaconda boost```
# Compilation
### C++
make sure that you use the following CPPFLAGS:
```set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -pthread -fopenmp -O3 -Wall -std=c++11")```
add ```include<./cut-pursuit/include/API.h>``` and call any of the interface functions.
### MATLAB
To compile the MATLAB mex file type the following in MATLAB in the workspace containing the ```cut-pursuit``` folder:
```
mkdir ./cut-pursuit/bin
addpath('./cut-pursuit/bin/')
mex CXXFLAGS="\$CXXFLAGS -pthread -Wall -std=c++11 -fopenmp -O3"...
LDFLAGS="\$LDFLAGS -fopenmp" cut-pursuit/mex/L0_cut_pursuit.cpp ...
-output cut-pursuit/bin/L0_cut_pursuit
mex CXXFLAGS="\$CXXFLAGS -pthread -Wall -std=c++11 -fopenmp -O3"...
LDFLAGS="\$LDFLAGS -fopenmp" cut-pursuit/mex/L0_cut_pursuit_segmentation.cpp ...
-output cut-pursuit/bin/L0_cut_pursuit_segmentation
```
You can test the compilation with the following minimal example:
```
n_nodes = 100;
y = rand(3,n_nodes);
Eu = 0:(n_nodes-2);
Ev = 1:(n_nodes-1);
edge_weight = ones(numel(Eu),1);
node_weight = ones(n_nodes,1);
lambda = .1;
mode = 1;
cutoff = 0;
weigth_decay = 0;
speedmode = 2;
verbosity = 2;
[solution, in_component, components] = L0_cut_pursuit_segmentation(single(y),...
uint32(Eu), uint32(Ev), single(lambda),...
single(edge_weight), single(node_weight), mode, cutoff, speedmode,...
weigth_decay, verbosity);
subplot(3,1,1)
imagesc(repmat(y, [1 1 1]))
title('input data')
subplot(3,1,2)
imagesc(solution)
title('piecewise constant approximation')
subplot(3,1,3)
imagesc(in_component')
title('components')
```
### Python
Compile the library from the ```cut-pursuit``` folder
```
mkdir build
cd build
cmake .. -DPYTHON_LIBRARY=$CONDAENV/lib/libpython3.6m.so -DPYTHON_INCLUDE_DIR=$CONDAENV/include/python3.6m -DBOOST_INCLUDEDIR=$CONDAENV/include -DEIGEN3_INCLUDE_DIR=$CONDAENV/include/eigen3
make
```
This creates ```build/src/libcp.so``` which can be imported in python. see ```test.py``` to test it out.
# References:
Cut Pursuit: fast algorithms to learn piecewise constant functions on general weighted graphs,
L. Landrieu and G. Obozinski, SIAM Journal on Imaging Science 2017, Vol. 10, No. 4 : pp. 1724-1766
[[hal link]]
Cut-pursuit algorithm for nonsmooth functionals regularized by graph total variation, H. Raguet and L. Landrieu, in preparation.
if using the L0-cut pursuit algorithm with \Omega other than R, one must also cite:
A structured regularization framework for spatially smoothing semantic labelings of 3D point clouds. Loic Landrieu, Hugo Raguet , Bruno Vallet , Clément Mallet, Martin Weinmann
# - Try to find the Python module NumPy
#
# This module defines:
# NUMPY_INCLUDE_DIR: include path for arrayobject.h
# Copyright (c) 2009-2012 Arnaud Barré <arnaud.barre@gmail.com>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
if (PYTHON_NUMPY_INCLUDE_DIR)
set(PYTHON_NUMPY_FIND_QUIETLY TRUE)
endif()
if (NOT PYTHON_EXECUTABLE)
message(FATAL_ERROR "\"PYTHON_EXECUTABLE\" varabile not set before FindNumPy.cmake was run.")
endif()
# Look for the include path
# WARNING: The variable PYTHON_EXECUTABLE is defined by the script FindPythonInterp.cmake
execute_process(COMMAND "${PYTHON_EXECUTABLE}" -c "import numpy; print (numpy.get_include()); print (numpy.version.version)"
OUTPUT_VARIABLE NUMPY_OUTPUT
ERROR_VARIABLE NUMPY_ERROR)
if (NOT NUMPY_ERROR)
STRING(REPLACE "\n" ";" NUMPY_OUTPUT ${NUMPY_OUTPUT})
LIST(GET NUMPY_OUTPUT 0 PYTHON_NUMPY_INCLUDE_DIRS)
LIST(GET NUMPY_OUTPUT 1 PYTHON_NUMPY_VERSION)
endif(NOT NUMPY_ERROR)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(NumPy DEFAULT_MSG PYTHON_NUMPY_VERSION PYTHON_NUMPY_INCLUDE_DIRS)
set(PYTHON_NUMPY_INCLUDE_DIR ${PYTHON_NUMPY_INCLUDE_DIRS}
CACHE PATH "Location of NumPy include files.")
mark_as_advanced(PYTHON_NUMPY_INCLUDE_DIR)
This diff is collapsed.
#pragma once
#include <string>
#include <sstream>
#include <ctime>
#include <functional>
#include<stdio.h>
#ifndef COMMON_H
#define COMMON_H
#endif // COMMON_H
namespace patch
{
template < typename T > std::string to_string( const T& n )
{
std::ostringstream stm ;
stm << n ;
return stm.str() ;
}
}
enum fidelityType {L2, linear, KL, SPG};
typedef std::pair<std::string, float> NameScale_t;
class GenericParameter
{
public:
std::string in_name, out_name, base_name, extension;
int natureOfData;
fidelityType fidelity;
//std::vector< NameScale_t > v_coord_name_scale, v_attrib_name_scale;
GenericParameter(std::string inName = "in_name", double reg_strength = 0, double fidelity = 0)
{
this->in_name = inName;
char buffer [inName.size() + 10];
std::string extension = inName.substr(inName.find_last_of(".") + 1);
this->extension = extension;
std::string baseName = inName.substr(0, inName.size() - extension.size() - 1);
this->base_name = baseName;
sprintf(buffer, "%s_out_%1.0f_%.0f.%s",baseName.c_str(),fidelity,reg_strength*1000, extension.c_str());
this->out_name = std::string(buffer);
this->natureOfData = 0;
this->fidelity = L2;
}
virtual ~GenericParameter() {}
};
class TimeStack
{
clock_t lastTime;
public:
TimeStack(){}
void tic() {
this->lastTime = clock();
}
std::string toc() {
std::ostringstream stm ;
stm << ((double)(clock() - this->lastTime)) / CLOCKS_PER_SEC;
return stm.str();
}
double tocDouble() {
std::ostringstream stm ;
double x = ((double)(clock() - this->lastTime)) / CLOCKS_PER_SEC;
return x;
}
};
template<typename T>
class ComponentsFusion
{//this class encode a potential fusion between two cadjacent component
//and is ordered wrt the merge_gain
public:
std::size_t comp1, comp2; //index of the components
std::size_t border_index; //index of the border-edge
T merge_gain; //gain obtained by mergeing the components
std::vector<T> merged_value; //value of the new components when they are merged
ComponentsFusion(std::size_t c1, std::size_t c2, std::size_t ind = 0, T gain = 0.)
{
this->comp1 = c1;
this->comp2 = c2;
this->border_index = ind;
this->merge_gain = gain;
}
};
template<typename T>
struct lessComponentsFusion: public std::binary_function<ComponentsFusion<T>, ComponentsFusion<T>, bool>
{
bool operator()(const ComponentsFusion<T> lhs, const ComponentsFusion<T> rhs) const
{
return lhs.merge_gain < rhs.merge_gain;
}
};
template<typename T>
class VectorOfCentroids
{
//VectorOfCentroids is a vector of size k x 2 x d where k is the number of components and
// d the dimension of the observation
public:
std::vector< std::vector< std::vector<T> > > centroids;
VectorOfCentroids(std::size_t nb_comp, std::size_t dim)
{
this->centroids = std::vector< std::vector< std::vector<T> > >(nb_comp,
std::vector< std::vector<T> >(2, std::vector<T>(dim, 0.0)));
}
};
template<typename T>
class Point3D
{
public:
T x,y,z;
Point3D(T x = 0., T y = 0., T z = 0.)
{
this->x = x;
this->y = y;
this->z = z;
}
};
template<typename T>
struct lessPoint3D: public std::binary_function<Point3D<T>, Point3D<T>, bool>
{
bool operator()(const Point3D<T> lhs, const Point3D<T> rhs) const
{
if (lhs.x != rhs.x)
{
return lhs.x < rhs.x;
}
if (lhs.y != rhs.y)
{
return lhs.y < rhs.y;
}
if (lhs.z > rhs.z)
{
return lhs.z < rhs.z;
}
return true;
}
};
##############################
### Find required packages ###
##############################
find_package(PythonLibs)
find_package(PythonInterp)
include(FindNumPy)
include_directories(../include)
find_package(OpenMP)
if (${OpenMP_CXX_FOUND})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
add_definitions(-DOPENMP)
endif()
find_package(Boost 1.65.0 COMPONENTS graph REQUIRED) #system filesystem thread serialization
if (${Boost_MINOR_VERSION} LESS 67 )
find_package(Boost 1.65.0 COMPONENTS numpy${PYTHON_VERSION_MAJOR} REQUIRED) #system filesystem thread serialization
else()
set(PYTHONVERSION ${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR})
find_package(Boost 1.67.0 COMPONENTS numpy${PYTHONVERSION} REQUIRED)
endif()
include_directories(${Boost_INCLUDE_DIRS})
link_directories(${Boost_LIBRARY_DIRS})
message("PYTHON LIBRARIES ARE " ${PYTHON_LIBRARIES})
INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS} ${PYTHON_NUMPY_INCLUDE_DIR})
LINK_DIRECTORIES(${PYTHON_LIBRARY_DIRS})
file(GLOB CP_HEADERS include/*.h)
set(CMAKE_LD_FLAG "${CMAKE_LD_FLAGS} -shared -Wl -fPIC --export-dynamic -o -O3 -Wall")
message(${Boost_LIBRARIES})
add_library(cp SHARED cutpursuit.cpp ${CP_HEADERS})
target_link_libraries(cp
${Boost_LIBRARIES}
${PYTHON_LIBRARIES})
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#!/bin/bash
# todo 使用新的 seed
reg_strength=0.008
# 104
python -u sff_create_seed.py --gpu 0 --seed_percent 0.005 --reg_strength ${reg_strength} > record_log/rebuttal_log_seed_${reg_strength}.txt 2>&1
# 105
python -u ssdr_main_S3DIS2.py --reg_strength ${reg_strength} --t 10000000 --gpu 0 --round 2 --sampler random --oracle_mode dominant --min_size 5 >> record_log/ssdr_log_t10000000-random-mean-dominant-0.9-5_${reg_strength}.txt 2>&1 &
python -u ssdr_main_S3DIS2.py --reg_strength ${reg_strength} --t 10000000 --gpu 0 --round 2 --sampler T --point_uncertainty_mode entropy --classbal 0 --uncertainty_mode mean --oracle_mode dominant --threshold 0.9 --min_size 5 >> record_log/ssdr_log_t10000000-entropy-mean-dominant-0.9-5_${reg_strength}.txt 2>&1 &
python -u ssdr_main_S3DIS2.py --reg_strength ${reg_strength} --t 10000000 --gpu 3 --round 2 --sampler T --point_uncertainty_mode lc --classbal 0 --uncertainty_mode mean --oracle_mode dominant --threshold 0.9 --min_size 5 >> record_log/ssdr_log_t10000000-lc-mean-dominant-0.9-5_${reg_strength}.txt 2>&1 &
python -u ssdr_main_S3DIS2.py --reg_strength ${reg_strength} --t 10000000 --gpu 2 --round 2 --sampler T --point_uncertainty_mode sb --classbal 0 --uncertainty_mode mean --oracle_mode dominant --threshold 0.9 --min_size 5 >> record_log/ssdr_log_t10000000-sb-mean-dominant-0.9-5_${reg_strength}.txt 2>&1 &
python -u sff_main_S3DIS2.py --reg_strength ${reg_strength} --t 10000000 --gpu 1 --round 2 --sampler T --point_uncertainty_mode sb --classbal 2 --uncertainty_mode mean --oracle_mode dominant --threshold 0.9 --min_size 5 >> record_log/rebuttal_log_t10000000-sb-clsbal-mean-dominant-0.9-5_${reg_strength}.txt 2>&1 &
python -u sff_main_S3DIS2.py --reg_strength ${reg_strength} --t 10000000 --gpu 0 --round 2 --sampler T --point_uncertainty_mode sb --classbal 2 --uncertainty_mode WetSU --gcn_fps 1 --oracle_mode NAIL --threshold 0.9 --min_size 5 >> record_log/rebuttal_log_t10000000-sb-clsbal-WetSU-gcn_fps-NAIL-0.9-5_${reg_strength}.txt 2>&1 &
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment