Android自定义GestureDetector实现手势ImageView
不说废话了,进入我们今天的主题吧。
先贴上前面内容的地址:
前面我们讲到了scalegesturedetector这个工具类,我在疑惑,为什么搞出一个scalegesturedetector,不顺带把什么旋转、移动、做了呢? 好吧~! 谷歌肯定还是想给开发者留一点自己的空间哈。
仿照scalegesturedetector,我们来定义一个叫movegesturedetector的工具类(专门用于检测滑动手势),在定义movegesturedetector之前,因为我们还要考虑到之后的rotategesturedetector等等..于是我们定一个叫basegesturedetector把一些公共的方法抽取出来:
public abstract class basegesturedetector { protected final context mcontext; protected boolean mgestureinprogress; protected motionevent mprevevent; protected motionevent mcurrevent; protected float mcurrpressure; protected float mprevpressure; protected long mtimedelta; /** * 上一次event的pressure/这一次的pressure,这是一个什么概念呢? * 我们想象一下当你手指按下然后滑动并且到离开屏幕, * 手指触碰到屏幕的压力会越来越小,直到手指移开屏幕 */ protected static final float pressure_threshold = 0.67f; public basegesturedetector(context context) { mcontext = context; } /** * 跟scalegesture一样,我们也把事件的处理放在此方法中 * @param event * @return */ public boolean ontouchevent(motionevent event){ //为了获取到action_pointer_up等事件必须加上& motionevent.action_mask final int actioncode = event.getaction() & motionevent.action_mask; /** * 是否调用handleinprogressevent方法 */ if (!mgestureinprogress) { //如果mgestureinprogress为false的时候,执行开始操作 handlestartprogressevent(actioncode, event); } else { //处理手势 handleinprogressevent(actioncode, event); } return true; } /** * 准备处理手势 * @param actioncode * @param event */ protected abstract void handlestartprogressevent(int actioncode, motionevent event); /** * 正在处理手势 * @param actioncode * @param event */ protected abstract void handleinprogressevent(int actioncode, motionevent event); /** * 更新event的状态,保存之前的event,获取当前event * @param curr */ protected void updatestatebyevent(motionevent curr){ final motionevent prev = mprevevent; // reset mcurrevent if (mcurrevent != null) { mcurrevent.recycle(); mcurrevent = null; } mcurrevent = motionevent.obtain(curr); // 之前的event跟现在的event之间的时间差 mtimedelta = curr.geteventtime() - prev.geteventtime(); // 之前的event跟腺癌的event之间的手指压力值 mcurrpressure = curr.getpressure(curr.getactionindex()); mprevpressure = prev.getpressure(prev.getactionindex()); } /** * 重置所有状态 */ protected void resetstate() { if (mprevevent != null) { mprevevent.recycle(); mprevevent = null; } if (mcurrevent != null) { mcurrevent.recycle(); mcurrevent = null; } mgestureinprogress = false; } /** * returns {@code true} if a gesture is currently in progress. * @return {@code true} if a gesture is currently in progress, {@code false} otherwise. */ public boolean isinprogress() { return mgestureinprogress; } /** * return the time difference in milliseconds between the previous accepted * gesturedetector event and the current gesturedetector event. * * @return time difference since the last move event in milliseconds. */ public long gettimedelta() { return mtimedelta; } /** * return the event time of the current gesturedetector event being * processed. * * @return current gesturedetector event time in milliseconds. */ public long geteventtime() { return mcurrevent.geteventtime(); } }
然后我们定义一个叫movegesturedetector的类去继承basegesturedetector,然后事件两个抽象方法:
public class movegesturedetector extends basegesturedetector{ @override protected void handlestartprogressevent(int actioncode, motionevent event){ } @override protected void handleinprogressevent(int actioncode, motionevent event){ } }
那我们如果检测到了事件的话该怎么通知调用者呢?是的,我们需要用到回调,我们看看scalegesturedetector的回调接口咋定义的:
public interface onscalegesturelistener { public boolean onscale(scalegesturedetector detector); public boolean onscalebegin(scalegesturedetector detector); public void onscaleend(scalegesturedetector detector); } public static class simpleonscalegesturelistener implements onscalegesturelistener { public boolean onscale(scalegesturedetector detector) { return false; } public boolean onscalebegin(scalegesturedetector detector) { return true; } public void onscaleend(scalegesturedetector detector) { // intentionally empty } }
里面定义了一个接口一个叫onscalegesturelistener,一个类叫simpleonscalegesturelistener,simpleonscalegesturelistener是实现了onscalegesturelistener,于是我们movegesturedetector的接口可以这么定义了:
/** * 仿照scalegesturedetector我们也定义三个方法 */ public interface onmovegesturelistener { /** * 移动的时候回调 */ public boolean onmove(movegesturedetector detector); /** * 移动开始的时候回调 */ public boolean onmovebegin(movegesturedetector detector); /** * 移动结束的时候回调 */ public void onmoveend(movegesturedetector detector); } public static class simpleonmovegesturelistener implements onmovegesturelistener { public boolean onmove(movegesturedetector detector) { return false; } public boolean onmovebegin(movegesturedetector detector) { return true; } public void onmoveend(movegesturedetector detector) { // do nothing, overridden implementation may be used } }
好啦!框子都搭好了,我们用的时候呢,就可以这么用了:
1、创建一个movegesturedetector
public matriximageview(context context, attributeset attrs) { super(context, attrs); initview(); //创建一个缩放手势监测器 scaledetector=new scalegesturedetector(context,onscalegesturelistener); //创建一个movegesturedetector movegesturedetector=new movegesturedetector(context,onmovegesturelistener); }
2、把事件给movegesturedetector
@override public boolean ontouchevent(motionevent event) { //把事件给scaledetector scaledetector.ontouchevent(event); //把事件给movegesturedetector movegesturedetector.ontouchevent(event); return true; }
3、获取回调值
private movegesturedetector.simpleonmovegesturelistener onmovegesturelistener=new movegesturedetector.simpleonmovegesturelistener(){ @override public boolean onmove(movegesturedetector detector) { return super.onmove(detector); } };
怎么样?是不是跟scalegesturedetector一样了呢?清晰明了哈,框子是搭起来了,下面我们来实现下它的逻辑(也就是实现下handlestartprogressevent跟handleinprogressevent方法):
每行都有注释,我就直接上代码了
*/ public class movegesturedetector extends basegesturedetector { /** * 仿照scalegesturedetector我们也定义三个方法 */ public interface onmovegesturelistener { /** * 移动的时候回调 */ public boolean onmove(movegesturedetector detector); /** * 移动开始的时候回调 */ public boolean onmovebegin(movegesturedetector detector); /** * 移动结束的时候回调 */ public void onmoveend(movegesturedetector detector); } public static class simpleonmovegesturelistener implements onmovegesturelistener { public boolean onmove(movegesturedetector detector) { return false; } public boolean onmovebegin(movegesturedetector detector) { return true; } public void onmoveend(movegesturedetector detector) { // do nothing, overridden implementation may be used } } private static final pointf focus_delta_zero = new pointf(); private final onmovegesturelistener mlistener; private pointf mcurrfocusinternal; private pointf mprevfocusinternal; private pointf mfocusexternal = new pointf(); private pointf mfocusdeltaexternal = new pointf(); public movegesturedetector(context context, onmovegesturelistener listener) { super(context); mlistener = listener; } @override protected void handlestartprogressevent(int actioncode, motionevent event){ switch (actioncode) { //当手指按下的时候 case motionevent.action_down: //重置一下所有状态(currevent跟preevent) resetstate(); // in case we missed an up/cancel event //获取当前event作为mprevevent mprevevent = motionevent.obtain(event); //重置两次event的时间间隔 mtimedelta = 0; //更新state updatestatebyevent(event); break; case motionevent.action_move: //回调onmovebegin,mgestureinprogress决定是否继续处理事件(执行handleinprogressevent) //mgestureinprogress由调用者决定 mgestureinprogress = mlistener.onmovebegin(this); break; } } /** * 处理移动事件 */ @override protected void handleinprogressevent(int actioncode, motionevent event){ switch (actioncode) { //当抬起或者取消的时候 case motionevent.action_up: case motionevent.action_cancel: //回调onmoveend,move处理结束 mlistener.onmoveend(this); //重置所有的state resetstate(); break; case motionevent.action_move: //更新状态 updatestatebyevent(event); //当上一次event的press值/这一次event值大于临界值的时候开始触发onmove //因为如果currpressure / mprevpressure很小的话,可能手指已经离开屏幕了 if (mcurrpressure / mprevpressure > pressure_threshold) { /** * 回调onmove方法,并获取updateprevious * updateprevious标记是由调用者决定, * updateprevious是否更新之前的event, * 如果为false的话mprevevent一直是我们在down的时候赋值的event * 如果为true的话,每次move事件处理完都会把最新的event赋给mprevevent */ final boolean updateprevious = mlistener.onmove(this); if (updateprevious) { mprevevent.recycle(); mprevevent = motionevent.obtain(event); } } break; } } /** * 参考scalegesturedetector * move核心处理方法 * 重写父类的updatestatebyevent * */ protected void updatestatebyevent(motionevent curr) { super.updatestatebyevent(curr); final motionevent prev = mprevevent; // 获取当前所有手指的中心点 mcurrfocusinternal = determinefocalpoint(curr); //获取之前event所有手指的中心点 mprevfocusinternal = determinefocalpoint(prev); //判断是否有手指中途添加或者移除 boolean mskipnextmoveevent = prev.getpointercount() != curr.getpointercount(); //有移除的话mfocusdeltaexternal就等于空(0,0),没有的话就算出前面event跟当前event中心点距离 mfocusdeltaexternal = mskipnextmoveevent ? focus_delta_zero : new pointf(mcurrfocusinternal.x - mprevfocusinternal.x, mcurrfocusinternal.y - mprevfocusinternal.y); //累加距离值 mfocusexternal.x += mfocusdeltaexternal.x; mfocusexternal.y += mfocusdeltaexternal.y; } /** * 获取所有手指的中间点坐标(参考scalegesturedetector) */ private pointf determinefocalpoint(motionevent e){ // number of fingers on screen final int pcount = e.getpointercount(); float x = 0f; float y = 0f; for(int i = 0; i < pcount; i++){ x += e.getx(i); y += e.gety(i); } return new pointf(x/pcount, y/pcount); } /** * 获取距离值累加过后的值 */ public float getfocusx() { return mfocusexternal.x; } public float getfocusy() { return mfocusexternal.y; } /** * 获取上一个事件到下一个事件之间的x跟y的距离值 */ public pointf getfocusdelta() { return mfocusdeltaexternal; } }
好啦!!写完哈,我们来使用一下:
package com.leo.gestureimageview; import android.content.context; import android.graphics.bitmap; import android.graphics.bitmapfactory; import android.graphics.matrix; import android.util.attributeset; import android.util.displaymetrics; import android.view.motionevent; import android.view.scalegesturedetector; import android.widget.imageview; import com.leo.gestureimageview.gesturedetectors.movegesturedetector; public class matriximageview extends imageview { private matrix currmatrix; private float scalefactor=1f;//当前图片的缩放值 private float transx,transy; private scalegesturedetector scaledetector; private movegesturedetector movegesturedetector; public matriximageview(context context, attributeset attrs) { super(context, attrs); initview(); //创建一个缩放手势监测器 scaledetector=new scalegesturedetector(context,onscalegesturelistener); //创建一个movegesturedetector movegesturedetector=new movegesturedetector(context,onmovegesturelistener); } private void initview() { currmatrix = new matrix(); displaymetrics dm = getresources().getdisplaymetrics(); bitmap bitmap = bitmapfactory.decoderesource(getresources(), r.mipmap.test); bitmap = bitmap.createscaledbitmap(bitmap, dm.widthpixels, dm.heightpixels, true); setimagebitmap(bitmap); } @override public boolean ontouchevent(motionevent event) { //把事件给scaledetector scaledetector.ontouchevent(event); //把事件给movegesturedetector movegesturedetector.ontouchevent(event); return true; } private void setmatrix(){ currmatrix.reset(); currmatrix.postscale(scalefactor,scalefactor,getmeasuredwidth()/2,getmeasuredheight()/2); currmatrix.posttranslate(transx,transy); setimagematrix(currmatrix); } private scalegesturedetector.simpleonscalegesturelistener onscalegesturelistener=new scalegesturedetector.simpleonscalegesturelistener(){ @override public boolean onscale(scalegesturedetector detector) { scalefactor *= detector.getscalefactor(); // scale change since previous event // don't let the object get too small or too large. scalefactor = math.max(0.1f, math.min(scalefactor, 10.0f)); setmatrix(); /** * 因为getscalefactor=当前两个手指之间的距离(preevent)/手指按下时候两个点的距离(currevent) * 这里如果返回true的话,会在move操作的时候去更新之前的event, * 如果为false的话,不会去更新之前按下时候保存的event */ return true; } }; private movegesturedetector.simpleonmovegesturelistener onmovegesturelistener=new movegesturedetector.simpleonmovegesturelistener(){ @override public boolean onmove(movegesturedetector detector) { transx=detector.getfocusx(); transy=detector.getfocusy(); setmatrix(); return true; } }; }
好啦~!! 短短几行代码就可以玩起来了,效果图我就不附了哈,小伙伴自己运行一下,那么movegesturedetector我们实现了,想必rotategesturedetector也是很快就会实现了,哈哈~~! 我就直接用贴上国外大神写的代码了:
public class rotategesturedetector extends twofingergesturedetector { /** * listener which must be implemented which is used by rotategesturedetector * to perform callbacks to any implementing class which is registered to a * rotategesturedetector via the constructor. * * @see simpleonrotategesturelistener */ public interface onrotategesturelistener { public boolean onrotate(rotategesturedetector detector); public boolean onrotatebegin(rotategesturedetector detector); public void onrotateend(rotategesturedetector detector); } /** * helper class which may be extended and where the methods may be * implemented. this way it is not necessary to implement all methods * of onrotategesturelistener. */ public static class simpleonrotategesturelistener implements onrotategesturelistener { public boolean onrotate(rotategesturedetector detector) { return false; } public boolean onrotatebegin(rotategesturedetector detector) { return true; } public void onrotateend(rotategesturedetector detector) { // do nothing, overridden implementation may be used } } private final onrotategesturelistener mlistener; private boolean msloppygesture; public rotategesturedetector(context context, onrotategesturelistener listener) { super(context); mlistener = listener; } @override protected void handlestartprogressevent(int actioncode, motionevent event){ switch (actioncode) { case motionevent.action_pointer_down: // at least the second finger is on screen now resetstate(); // in case we missed an up/cancel event mprevevent = motionevent.obtain(event); mtimedelta = 0; updatestatebyevent(event); // see if we have a sloppy gesture msloppygesture = issloppygesture(event); if(!msloppygesture){ // no, start gesture now mgestureinprogress = mlistener.onrotatebegin(this); } break; case motionevent.action_move: if (!msloppygesture) { break; } // see if we still have a sloppy gesture msloppygesture = issloppygesture(event); if(!msloppygesture){ // no, start normal gesture now mgestureinprogress = mlistener.onrotatebegin(this); } break; case motionevent.action_pointer_up: if (!msloppygesture) { break; } break; } } @override protected void handleinprogressevent(int actioncode, motionevent event){ switch (actioncode) { case motionevent.action_pointer_up: // gesture ended but updatestatebyevent(event); if (!msloppygesture) { mlistener.onrotateend(this); } resetstate(); break; case motionevent.action_cancel: if (!msloppygesture) { mlistener.onrotateend(this); } resetstate(); break; case motionevent.action_move: updatestatebyevent(event); // only accept the event if our relative pressure is within // a certain limit. this can help filter shaky data as a // finger is lifted. if (mcurrpressure / mprevpressure > pressure_threshold) { final boolean updateprevious = mlistener.onrotate(this); if (updateprevious) { mprevevent.recycle(); mprevevent = motionevent.obtain(event); } } break; } } @override protected void resetstate() { super.resetstate(); msloppygesture = false; } /** * return the rotation difference from the previous rotate event to the current * event. * * @return the current rotation //difference in degrees. */ public float getrotationdegreesdelta() { double diffradians = math.atan2(mprevfingerdiffy, mprevfingerdiffx) - math.atan2(mcurrfingerdiffy, mcurrfingerdiffx); return (float) (diffradians * 180 / math.pi); } }
最后把我们结合了scaledetector、movedetector、rotatedetector的一个手势缩放imageview的代码给大家:
package com.leo.gestureimageview; import android.content.context; import android.graphics.bitmap; import android.graphics.bitmapfactory; import android.graphics.matrix; import android.graphics.pointf; import android.util.attributeset; import android.util.displaymetrics; import android.view.motionevent; import android.view.scalegesturedetector; import android.widget.imageview; import com.leo.gestureimageview.gesturedetectors.movegesturedetector; import com.leo.gestureimageview.gesturedetectors.rotategesturedetector; public class matriximageview2 extends imageview { private matrix mmatrix = new matrix(); private float mscalefactor =1f; private float mrotationdegrees = 0.f; private float mfocusx = 0.f; private float mfocusy = 0.f; private scalegesturedetector mscaledetector; private rotategesturedetector mrotatedetector; private movegesturedetector mmovedetector; public matriximageview2(context context, attributeset attrs) { super(context, attrs); initview(); } private void initview() { //初始化模式为初始状态 displaymetrics dm = getresources().getdisplaymetrics(); //给imageview设置一张图片(此处为了测试直接在imageview里面设置了一张测试图片) bitmap bitmap = bitmapfactory.decoderesource(getresources(), r.mipmap.test); bitmap = bitmap.createscaledbitmap(bitmap, dm.widthpixels, dm.heightpixels, true); setimagebitmap(bitmap); mscaledetector = new scalegesturedetector(getcontext(), new scalelistener()); mrotatedetector = new rotategesturedetector(getcontext(), new rotatelistener()); mmovedetector = new movegesturedetector(getcontext(), new movelistener()); mfocusx = dm.widthpixels/2f; mfocusy = dm.heightpixels/2f; } @override public boolean ontouchevent(motionevent event) { //把缩放事件给mscaledetector mscaledetector.ontouchevent(event); //把旋转事件个mrotatedetector mrotatedetector.ontouchevent(event); //把移动事件给mmovedetector mmovedetector.ontouchevent(event); return true; } private class scalelistener extends scalegesturedetector.simpleonscalegesturelistener { @override public boolean onscale(scalegesturedetector detector) { mscalefactor *= detector.getscalefactor(); // scale change since previous event // don't let the object get too small or too large. mscalefactor = math.max(0.1f, math.min(mscalefactor, 10.0f)); changematrix(); return true; } } private class rotatelistener extends rotategesturedetector.simpleonrotategesturelistener { @override public boolean onrotate(rotategesturedetector detector) { mrotationdegrees -= detector.getrotationdegreesdelta(); changematrix(); return true; } } private class movelistener extends movegesturedetector.simpleonmovegesturelistener { @override public boolean onmove(movegesturedetector detector) { pointf d = detector.getfocusdelta(); mfocusx += d.x; mfocusy += d.y; changematrix(); return true; } } private void changematrix(){ float scaledimagecenterx = (getdrawable().getintrinsicwidth()*mscalefactor)/2; float scaledimagecentery = (getdrawable().getintrinsicheight()*mscalefactor)/2; mmatrix.reset(); mmatrix.postscale(mscalefactor, mscalefactor); mmatrix.postrotate(mrotationdegrees, scaledimagecenterx, scaledimagecentery); mmatrix.posttranslate(mfocusx - scaledimagecenterx, mfocusy - scaledimagecentery); setimagematrix(mmatrix); } }
好啦~~~小伙伴也可以自己下载一下这个框架的代码去研究,我这呢也只是把自己学习的心得分享给大家。
https://github.com/almeros/android-gesture-detectors
嗯嗯!说了那么多,最后让我们看看传说中的photoview到底是咋实现的。
photoview的github链接:
https://github.com/chrisbanes/photoviewary/
看完我们之前的内容,再去看photoview的话,你可能不会那么迷茫了,下面让我们一起揭开它的神秘面纱:
首先photoview的用法呢,很简单,小伙伴像用imageview一样用它就可以了:
<uk.co.senab.photoview.photoview android:clickable="true" android:layout_width="match_parent" android:layout_height="match_parent" android:scaletype="fitxy" />
好啦!!现在就可以对图片进行缩放、旋转、移动操作啦~是不是很爽呢?
但是注意:
photoview的缩放类型不支持,不然就直接报错退出了:
android:scaletype="matrix"
我们来看看它的源码:
public class photoview extends imageview implements iphotoview { private photoviewattacher mattacher; private scaletype mpendingscaletype; public photoview(context context) { this(context, null); } public photoview(context context, attributeset attr) { this(context, attr, 0); } public photoview(context context, attributeset attr, int defstyle) { super(context, attr, defstyle); super.setscaletype(scaletype.matrix); init(); } protected void init() { if (null == mattacher || null == mattacher.getimageview()) { mattacher = new photoviewattacher(this); } if (null != mpendingscaletype) { setscaletype(mpendingscaletype); mpendingscaletype = null; } } @override public void setrotationto(float rotationdegree) { mattacher.setrotationto(rotationdegree); } @override public void setrotationby(float rotationdegree) { mattacher.setrotationby(rotationdegree); } @override public boolean canzoom() { return mattacher.canzoom(); } @override public rectf getdisplayrect() { return mattacher.getdisplayrect(); } @override public void getdisplaymatrix(matrix matrix) { mattacher.getdisplaymatrix(matrix); } @override public boolean setdisplaymatrix(matrix finalrectangle) { return mattacher.setdisplaymatrix(finalrectangle); } @override public float getminimumscale() { return mattacher.getminimumscale(); } @override public float getmediumscale() { return mattacher.getmediumscale(); } @override public float getmaximumscale() { return mattacher.getmaximumscale(); } @override public float getscale() { return mattacher.getscale(); } @override public scaletype getscaletype() { return mattacher.getscaletype(); } @override public matrix getimagematrix() { return mattacher.getimagematrix(); } @override public void setallowparentinterceptonedge(boolean allow) { mattacher.setallowparentinterceptonedge(allow); } @override public void setminimumscale(float minimumscale) { mattacher.setminimumscale(minimumscale); } @override public void setmediumscale(float mediumscale) { mattacher.setmediumscale(mediumscale); } @override public void setmaximumscale(float maximumscale) { mattacher.setmaximumscale(maximumscale); } @override public void setscalelevels(float minimumscale, float mediumscale, float maximumscale) { mattacher.setscalelevels(minimumscale, mediumscale, maximumscale); } @override // setimagebitmap calls through to this method public void setimagedrawable(drawable drawable) { super.setimagedrawable(drawable); if (null != mattacher) { mattacher.update(); } } @override public void setimageresource(int resid) { super.setimageresource(resid); if (null != mattacher) { mattacher.update(); } } @override public void setimageuri(uri uri) { super.setimageuri(uri); if (null != mattacher) { mattacher.update(); } } @override protected boolean setframe(int l, int t, int r, int b) { boolean changed = super.setframe(l, t, r, b); if (null != mattacher) { mattacher.update(); } return changed; } @override public void setonmatrixchangelistener(onmatrixchangedlistener listener) { mattacher.setonmatrixchangelistener(listener); } @override public void setonlongclicklistener(onlongclicklistener l) { mattacher.setonlongclicklistener(l); } @override public void setonphototaplistener(onphototaplistener listener) { mattacher.setonphototaplistener(listener); } @override public void setonviewtaplistener(onviewtaplistener listener) { mattacher.setonviewtaplistener(listener); } @override public void setscale(float scale) { mattacher.setscale(scale); } @override public void setscale(float scale, boolean animate) { mattacher.setscale(scale, animate); } @override public void setscale(float scale, float focalx, float focaly, boolean animate) { mattacher.setscale(scale, focalx, focaly, animate); } @override public void setscaletype(scaletype scaletype) { if (null != mattacher) { mattacher.setscaletype(scaletype); } else { mpendingscaletype = scaletype; } } @override public void setzoomable(boolean zoomable) { mattacher.setzoomable(zoomable); } @override public bitmap getvisiblerectanglebitmap() { return mattacher.getvisiblerectanglebitmap(); } @override public void setzoomtransitionduration(int milliseconds) { mattacher.setzoomtransitionduration(milliseconds); } @override public iphotoview getiphotoviewimplementation() { return mattacher; } @override public void setondoubletaplistener(gesturedetector.ondoubletaplistener newondoubletaplistener) { mattacher.setondoubletaplistener(newondoubletaplistener); } @override public void setonscalechangelistener(photoviewattacher.onscalechangelistener onscalechangelistener) { mattacher.setonscalechangelistener(onscalechangelistener); } @override public void setonsingleflinglistener(photoviewattacher.onsingleflinglistener onsingleflinglistener) { mattacher.setonsingleflinglistener(onsingleflinglistener); } @override protected void ondetachedfromwindow() { mattacher.cleanup(); mattacher = null; super.ondetachedfromwindow(); } @override protected void onattachedtowindow() { init(); super.onattachedtowindow(); } }
可以看到,代码并不多,才200多行(哈哈!!我们自己实现的matriximageview 100行都还不到呢!!开玩笑哈,photoview里面考虑的东西跟兼容性,我们写的matriximageview远远不及哈),主要的处理所及都在photoviewattacher这个类中:
photoviewattacher.java:
代码太多,我们看看它的构造方法
public photoviewattacher(imageview imageview, boolean zoomable) { mimageview = new weakreference<>(imageview); imageview.setdrawingcacheenabled(true); imageview.setontouchlistener(this); viewtreeobserver observer = imageview.getviewtreeobserver(); if (null != observer) observer.addongloballayoutlistener(this); // make sure we using matrix scale type setimageviewscaletypematrix(imageview); if (imageview.isineditmode()) { return; } // create gesture detectors... mscaledragdetector = versionedgesturedetector.newinstance( imageview.getcontext(), this); mgesturedetector = new gesturedetector(imageview.getcontext(), new gesturedetector.simpleongesturelistener() { // forward long click listener @override public void onlongpress(motionevent e) { if (null != mlongclicklistener) { mlongclicklistener.onlongclick(getimageview()); } } @override public boolean onfling(motionevent e1, motionevent e2, float velocityx, float velocityy) { if (msingleflinglistener != null) { if (getscale() > default_min_scale) { return false; } if (motioneventcompat.getpointercount(e1) > single_touch || motioneventcompat.getpointercount(e2) > single_touch) { return false; } return msingleflinglistener.onfling(e1, e2, velocityx, velocityy); } return false; } }); mgesturedetector.setondoubletaplistener(new defaultondoubletaplistener(this)); mbaserotation = 0.0f; // finally, update the ui so that we're zoomable setzoomable(zoomable); }
可以看到,它也是创建了一个mscaledragdetector跟一个mgesturedetector用于监听手势变幻,那么事件处理在什么地方呢?
我们在构造方法还发现了一行代码,给当前imageview设置触碰监听:
imageview.setontouchlistener(this);
小伙伴猜都猜到了,现在就是把事件给事件监听器了:
@override public boolean ontouch(view v, motionevent ev) { boolean handled = false; if (mzoomenabled && hasdrawable((imageview) v)) { viewparent parent = v.getparent(); switch (ev.getaction()) { case action_down: // first, disable the parent from intercepting the touch // event if (null != parent) { parent.requestdisallowintercepttouchevent(true); } else { logmanager.getlogger().i(log_tag, "ontouch getparent() returned null"); } // if we're flinging, and the user presses down, cancel // fling cancelfling(); break; case action_cancel: case action_up: // if the user has zoomed less than min scale, zoom back // to min scale if (getscale() < mminscale) { rectf rect = getdisplayrect(); if (null != rect) { v.post(new animatedzoomrunnable(getscale(), mminscale, rect.centerx(), rect.centery())); handled = true; } } break; } // try the scale/drag detector if (null != mscaledragdetector) { boolean wasscaling = mscaledragdetector.isscaling(); boolean wasdragging = mscaledragdetector.isdragging(); handled = mscaledragdetector.ontouchevent(ev); boolean didntscale = !wasscaling && !mscaledragdetector.isscaling(); boolean didntdrag = !wasdragging && !mscaledragdetector.isdragging(); mblockparentintercept = didntscale && didntdrag; } // check to see if the user double tapped if (null != mgesturedetector && mgesturedetector.ontouchevent(ev)) { handled = true; } } return handled; }
最后处理完毕事件后,就是一系列的回调了,回调完毕后就应该给imageview重新设置matrix对象了,比如缩放:
@override public void setscale(float scale, float focalx, float focaly, boolean animate) { imageview imageview = getimageview(); if (null != imageview) { // check to see if the scale is within bounds if (scale < mminscale || scale > mmaxscale) { logmanager .getlogger() .i(log_tag, "scale must be within the range of minscale and maxscale"); return; } if (animate) { imageview.post(new animatedzoomrunnable(getscale(), scale, focalx, focaly)); } else { msuppmatrix.setscale(scale, scale, focalx, focaly); checkanddisplaymatrix(); } } }
其它的类似哈~~~ 代码还是挺多的(考虑的情况比较多)可想而之,要写好一个自定义组件还不是那么简单的事哦,不过还是加油吧~!
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。
上一篇: 微信官方正式宣布上线沟通接口:App可跳转至公众号
下一篇: 详解Vue中使用Axios拦截器