我正在使用Angular Material进行对话框,它将是一个父组件(包含对话框的页眉和页脚),然后可以使用<ng-content>/ng-content>
投射任意子组件(对话框内容)。
dialog.component.html:
<h1 mat-dialog-title>Title</h1>
<mat-dialog-content>
<ng-content></ng-content>
</mat-dialog-content>
<mat-dialog-actions>
<button mat-icon-button (click)="submit()">
<mat-icon>save</mat-icon>
</button>
</mat-dialog-actions>
arbitrary-dialog-content.component.html
<dialog>
<!-- Dialog content here -->
</dialog>
我想做的是从对话框的save()方法中调用任意对话框内容的save()方法,因此我需要从父组件中获取对任意组件的引用。我尝试使用@ContentChild,但无法运行,因为直到运行时我都不知道哪个组件将成为对话框内容。
答案 0 :(得分:1)
您可以在对话框组件上使用事件发射器在组件之间进行通信。
将此添加到您的dialog.ts中:
# Define a single function that can extract features using hog sub-sampling and make predictions
def find_cars(img, ystart, ystop, scale, cspace, hog_channel, svc, X_scaler, orient,
pix_per_cell, cell_per_block, spatial_size, hist_bins, show_all_rectangles=False):
# array of rectangles where cars were detected
rectangles = []
img = img.astype(np.float32)/255
img_tosearch = img[ystart:ystop,:,:]
# apply color conversion if other than 'RGB'
if cspace != 'RGB':
if cspace == 'HSV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YCrCb)
else: ctrans_tosearch = np.copy(image)
# rescale image if other than 1.0 scale
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
# select colorspace channel for HOG
if hog_channel == 'ALL':
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
else:
ch1 = ctrans_tosearch[:,:,hog_channel]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell)+1 #-1
nyblocks = (ch1.shape[0] // pix_per_cell)+1 #-1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell)-1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
if hog_channel == 'ALL':
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
if hog_channel == 'ALL':
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
else:
hog_features = hog_feat1
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
################ ONLY FOR BIN_SPATIAL AND COLOR_HIST ################
# Extract the image patch
#subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))
# Get color features
#spatial_features = bin_spatial(subimg, size=spatial_size)
#hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
#test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
#test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
#test_prediction = svc.predict(test_features)
######################################################################
test_prediction = svc.predict(hog_features)
if test_prediction == 1 or show_all_rectangles:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
rectangles.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart)))
return rectangles
print('...')
#------------Now calling find_cars()
test_img = mpimg.imread('./test_images/test1.jpg')
ystart = 400
ystop = 656
scale = 1.5
colorspace = 'YUV' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 11
pix_per_cell = 16
cell_per_block = 2
hog_channel = 'ALL' # Can be 0, 1, 2, or "ALL"
rectangles = find_cars(test_img, ystart, ystop, scale, colorspace, hog_channel, svc, None, orient, pix_per_cell, cell_per_block, None, None)
print(len(rectangles), 'rectangles found in image')
然后在您的任意组件html中:
@Output() onSubmit = new EventEmitter();
submit() {
// do whatever you want
this.onSubmit.next();
}
其中save()是您的组件保存功能
共享服务将是一个更全面的解决方案,但这适用于此用例。