公共类ExtractText {
/**
* private constructor.
*/
private ExtractText()
{
//static class
}
public static void main( String[] args ) throws Exception
{
if(l!=null)
{
System.out.println("HERE"+l.length);
deleteSubs(op);
System.out.println("Then"+l.length);
}
else
{
System.out.println("WHERE");
}
File y=new File(imgDes);
if(!y.exists())
{
y.mkdirs();
}
File z=new File(imgDestination);
if(!z.exists())
{
z.mkdirs();
}
File fr=new File(outputFile);
if(!fr.isDirectory())
{
fr.delete();
}
// Defaults to text files
String ext = ".txt";
int startPage = 1;
int endPage = Integer.MAX_VALUE;
Writer output = null;
PDDocument document =null;
try
{
try
{
URL url = new URL( pdfFile );
document = PDDocument.load(url, force);
String fileName = url.getFile();
if( outputFile == null && fileName.length() >4)
{
outputFile = new File( fileName.substring( 0, fileName.length() -4 ) + ext ).getName();
}
}
catch( MalformedURLException e)
{
document = PDDocument.load(pdfFile, force);
if( outputFile == null && pdfFile.length() >4 )
{
outputFile = pdfFile.substring( 0, pdfFile.length() -4 ) + ext;
}
}
//document.print();
if( document.isEncrypted() )
{
StandardDecryptionMaterial sdm = new StandardDecryptionMaterial( password );
document.openProtection(sdm);
AccessPermission ap = document.getCurrentAccessPermission();
if( ! ap.canExtractContent() )
{
throw new IOException("You do not have permission to extract text" );
}
}
if ((encoding == null) && (toHTML))
{
encoding = "UTF-8";
}
if( toConsole )
{
output = new OutputStreamWriter(System.out);
}
else
{
if( encoding != null )
{
output = new OutputStreamWriter(new FileOutputStream( outputFile ), encoding );
}
else
{
//use default encoding
output = new OutputStreamWriter(new FileOutputStream( outputFile ) );
}
}
PDFTextStripper4 stripper = null;
if(toHTML)
{
stripper = new PDFText2HTML(encoding);
}
else
{
stripper = new PDFTextStripper4(encoding);
}
File f= new File(imgDestination);
PDDocument pd;
int i=0;
if(f.exists())
{
pd=PDDocument.load(pdfFile);
PDFontDescriptor fd;
fd = new PDFontDescriptorDictionary();
List<PDPage> li=pd.getDocumentCatalog().getAllPages();
for(PDPage page:li)
{
PDResources pdr=page.getResources();
Map<String, PDFont> m=pdr.getFonts();
PDStream pst;
for(PDFont pdd:m.values())
{
System.out.println("----------"+pdd.getBaseFont());
pdd.getFontDescriptor();
fd = pdd.getFontDescriptor();
pdd.setFontDescriptor((PDFontDescriptorDictionary)fd);
System.out.println("tititititi"+pdd.getFontEncoding());
if(pdd.isType1Font())
{
pst=((PDFontDescriptorDictionary) fd).getFontFile3();
System.out.println("In If "+pst);
if(pst!= null)
{
FileOutputStream fos = new FileOutputStream(new File(imgDestination+pdd.getBaseFont().toString()+".pfb"));
IOUtils.copy(pst.createInputStream(), fos);
i++;
System.out.println(i);
fos.close();
}
}
else
if(pdd.isTrueTypeFont())
{
pst= ((PDFontDescriptorDictionary) fd).getFontFile2();
System.out.println("In Else-if"+pst);
if (pst!= null)
{
FileOutputStream fos = new FileOutputStream(new File(imgDestination+pdd.getBaseFont().toString()+".ttf"));
IOUtils.copy(pst.createInputStream(), fos);
i++;
System.out.println(i);
fos.close();
}
}
else
if(pdd.isSymbolicFont())
{
System.out.println("Symbol.......");
}
else
{
System.out.println("In Else");
}
}
}
int pageCount = document.getDocumentCatalog().getAllPages().size();
for (int p = 0; p < pageCount; ++p)
{
System.out.println("I am in for loop");
stripper.setForceParsing( force );
stripper.setSortByPosition( true );
stripper.setShouldSeparateByBeads(separateBeads);
stripper.setStartPage( p);
stripper.setEndPage( p);
stripper.writeText( document, output );
FileOutputStream fos = new FileOutputStream(new File(f5+(p+1)+".html"));
output.close();
}
PDDocumentInformation info = document.getDocumentInformation();
System.out.println( "Page Count=" + document.getNumberOfPages());
System.out.println( "Title=" + info.getTitle());
System.out.println( "Author=" + info.getAuthor());
System.out.println( "Subject=" + info.getSubject() );
System.out.println( "Keywords=" + info.getKeywords() );
System.out.println( "Creator=" + info.getCreator() );
System.out.println( "Producer=" + info.getProducer() );
System.out.println( "Creation Date=" + info.getCreationDate() );
System.out.println( "Modification Date=" + info.getModificationDate());
System.out.println( "Trapped=" + info.getTrapped());
}
}catch(Exception e)
{
e.printStackTrace();
}
finally
{
if( output != null)
{
output.close();
}
if( document != null )
{
document.close();
}
}
}
private static void deleteSubs(File op)
{
// TODO Auto-generated method stub
File[] files = op.listFiles();
System.out.print("In delete folder");
if(files!=null)
{
//some JVMs return null for empty dirs
for(File f: files)
{
if(f.isDirectory())
{
deleteSubs(f);
}
else
{
f.delete();
}
}
}
op.delete();
}
}
现在我能够将整个pdf转换为html文件,即。我只提取文本而不是图像,但我想将pdf的每个页面都放到单个html中,所以任何解决方案都对我很有帮助.. THANKYOU
答案 0 :(得分:0)
答案在你的问题中:只需设置
即可 stripper.setStartPage( p );
stripper.setEndPage( p );
相应。所以你会像这样循环:
int pageCount = document.getDocumentCatalog().getAllPages().size();
for (int p = 0; p < pageCount; ++p)
{
//... your options
stripper.setStartPage(p);
stripper.setEndPage(p);
FileOutputStream fos = new FileOutputStream(new File(f5+(p+1)+".html"));
stripper.writeText(document, fos);
fos.close();
}
如果你得到与排序比较器有关的异常,请使用setSortByPosition(false),或者等待修复此问题的版本1.8.8。