我知道inbeded do直到工作正常但是当我设置activecell.address然后尝试返回它时我没有被带到原来的位置。
Sub Refresh()
Dim PN
Dim Supervisor
Dim Location
Worksheets("sheet1").Activate
Range("A2").Activate
Do Until IsEmpty(ActiveCell.Value)
PN = ActiveCell.Value
Location = ActiveCell.Address
Worksheets("sheet4").Activate
Range("C2").Activate
Do Until IsEmpty(ActiveCell.Value)
If PN = ActiveCell.Value Then
ActiveCell.Offset(0, 18).Select
Supervisor = ActiveCell.Value
ActiveCell.Offset(0, -18).Select
End If
ActiveCell.Offset(1, 0).Select
Loop
Range(Location).Activate
ActiveCell.Offset(0, 5).Select
ActiveCell.Value = Supervisor
ActiveCell.Offset(0, -5).Select
Range(Location).Activate
ActiveCell.Offset(1, 0).Select
Loop
End Sub
答案 0 :(得分:0)
看看是否有效
#include <iostream>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
using namespace std;
char key = 'a';
int framecount = 0;
SurfFeatureDetector detector( 500 );
SurfDescriptorExtractor extractor;
FlannBasedMatcher matcher;
Mat frame, des_object, image;
Mat des_image, img_matches, H;
std::vector<KeyPoint> kp_object;
std::vector<Point2f> obj_corners(4);
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
int main()
{
//reference image
Mat object = imread( "C:\\OpenCV2.4.6\\test.png", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
detector.detect( object, kp_object );
extractor.compute( object, kp_object, des_object );
VideoCapture cap(0);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
while (key != 27)
{
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++)
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, frame, kp_image, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 3)
{
for( int i = 0; i < good_matches.size(); i++ )
{
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
try
{
H = findHomography( obj, scene, CV_RANSAC );
}
catch(Exception e){}
perspectiveTransform( obj_corners, scene_corners, H);
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}
imshow( "Good Matches", img_matches );
good_matches.clear();
key = waitKey(1);
}
return 0;
}
答案 1 :(得分:0)
而不是在代码中使用“Range(Location).Activate”而使用“Range(Location).select”。剩余的代码将按照您的逻辑工作