方法一,使用 fsockopen
严重鄙视curl_getinfo!
代码如下 | 复制代码 |
function get_http_code($url="localhost", $port=80, $fsock_timeout=10){ set_time_limit(0); ignore_user_abort(true); // 记录开始时间 list($usec, $sec) = explode(" ", microtime(true)); $timer['start'] = (float)$usec + (float)$sec; // 校验URL if(!preg_match("/^https?:\/\//i", $url)){ $url = "http://".$url; } // 支持HTTPS if(preg_match("/^https:\/\//i", $url)){ $port = 443; } // 解析URL $urlinfo = parse_url($url); if(empty($urlinfo['path'])){ $urlinfo['path'] = '/'; } $host = $urlinfo['host']; $uri = $urlinfo['path'] . (empty($urlinfo['query'])?'':$urlinfo['query']); // 通过fsock打开连接 if(!$fp = fsockopen($host, $port, $errno, $error, $fsock_timeout)){ list($usec, $sec) = explode(" ", microtime(true)); $timer['end'] = (float)$usec + (float)$sec; $usetime = (float)$timer['end'] - (float)$timer['start']; return array('code'=>-1, 'usetime'=>$usetime); } // 提交请求 $status = socket_get_status($fp); $out = "GET {$uri} HTTP/1.1\r\n"; $out .= "Host: {$host}\r\n"; $out .= "Connection: Close\r\n\r\n"; $write = fwrite($fp, $out); if(!$write){ list($usec, $sec) = explode(" ", microtime(true)); $timer['end'] = (float)$usec + (float)$sec; $usetime = (float)$timer['end'] - (float)$timer['start']; return array('code'=>-2, 'usetime'=>$usetime); } $ret = fgets($fp, 1024); preg_match("/http\/\d\.\d\s(\d+)/i", $ret, $m); $code = $m[1]; fclose($fp); list($usec, $sec) = explode(" ", microtime(true)); $timer['end'] = (float)$usec + (float)$sec; $usetime = (float)$timer['end'] - (float)$timer['start']; return array('code'=>$code, 'usetime'=>$usetime); } |
file_get_contents 是 fsockopen 功能的简单打包,效率稍低些,但是抓取成功率很高,所以在 snoopy 出问题的时候我一般那他来。5.0.0 添加了对 context 的支持,有了context,他也可以发送 header 信息,自定义用户 agent, referer, cookies 都不在话下。5.1.0 添加了 offset 和 maxlen 参数,可以只读文件的一部分内容。
方法二,使用snoopy.class.php
Snoopy是一个php类,用来模拟浏览器的功能,可以获取网页内容,发送表单。
代码如下 | 复制代码 |
$ch = curl_init(); curl_setopt($ch, CURLOPT_URL, 'http://www.spiegel.de/'); curl_setopt($ch, CURLOPT_RANGE, '0-500'); curl_setopt($ch, CURLOPT_BINARYTRANSFER, 1); curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1); $result = curl_exec($ch); curl_close($ch); echo $result; /** *But as noted before if the server doesn't honor this header but sends the whole file curl will download all of it. E.g. http://www.111cn.net ignores the header. But you can (in addition) set a write function callback and abort the request when more data is received, e.g. * php 5.3+ only * use function writefn($ch, $chunk) { ... } for earlier versions */ $writefn = function($ch, $chunk) { static $data=''; static $limit = 500; // 500 bytes, it's only a test $len = strlen($data) + strlen($chunk); if ($len >= $limit ) { $data .= substr($chunk, 0, $limit-strlen($data)); echo strlen($data) , ' ', $data; return -1; } $data .= $chunk; return strlen($chunk); }; $ch = curl_init(); curl_setopt($ch, CURLOPT_URL, 'http://www.111cn.net/'); curl_setopt($ch, CURLOPT_RANGE, '0-500'); curl_setopt($ch, CURLOPT_BINARYTRANSFER, 1); curl_setopt($ch, CURLOPT_WRITEFUNCTION, $writefn); $result = curl_exec($ch); curl_close($ch); |
一些常见的状态码为:
200 - 服务器成功返回网页
404 - 请求的网页不存在
503 - 服务器超时
301 - 页面重定向
时间: 2024-10-23 11:02:04