使用Canvas作为多个范围输入的填充

时间:2017-05-13 02:20:39

标签: javascript html html5-canvas web-audio-api

我试图建立一个html画布垫,允许用户在打击垫上拖放一个点,然后返回两个值(一个用于Y轴,一个用于Y轴),我可以使用web音频API来触发效果。

我已经整理了问题的网络音频API部分。

用户:

  • 点击并将点拖动到X / Y网格上的任意位置
  • On Drop我们将有一个X& Y值(可能在隐藏范围输入中),触发eventListeners。
  • X值eventListener影响延迟效果的import commands import os import subprocess import sys # Indicate the openMVG and openMVS binary directories OPENMVG_BIN = "D:/Pro/OpenMVG/install/bin/" OPENMVS_BIN = "D:/Pro/OpenMVS/install/bin/" # Indicate the openMVG camera sensor width directory CAMERA_SENSOR_WIDTH_DIRECTORY = OPENMVG_BIN DEBUG=False ## HELPERS for terminal colors BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) NO_EFFECT, BOLD, UNDERLINE, BLINK, INVERSE, HIDDEN = (0,1,4,5,7,8) #from Python cookbook, #475186 def has_colours(stream): if not hasattr(stream, "isatty"): return False if not stream.isatty(): return False # auto color only on TTYs try: import curses curses.setupterm() return curses.tigetnum("colors") > 2 except: # guess false in case of error return False has_colours = has_colours(sys.stdout) def printout(text, colour=WHITE, background=BLACK, effect=NO_EFFECT): if has_colours: seq = "\x1b[%d;%d;%dm" % (effect, 30+colour, 40+background) + text + "\x1b[0m" sys.stdout.write(seq+'\r\n') else: sys.stdout.write(text+'\r\n') ## OBJECTS to store config and data in class ConfContainer(object): """Container for all the config variables""" pass conf=ConfContainer() class aStep: def __init__(self, info, cmd, opt): self.info = info self.cmd = cmd self.opt = opt class stepsStore : def __init__(self): self.steps_data=[ [ "Intrinsics analysis", os.path.join(OPENMVG_BIN,"openMVG_main_SfMInit_ImageListing"), ["-i", "%input_dir%", "-o", "%matches_dir%", "-d", "%camera_file_params%"] ], [ "Compute features", os.path.join(OPENMVG_BIN,"openMVG_main_ComputeFeatures"), ["-i", "%matches_dir%/sfm_data.json", "-o", "%matches_dir%", "-m", "SIFT", "-n", "4"] ], [ "Compute matches", os.path.join(OPENMVG_BIN, "openMVG_main_ComputeMatches"), ["-i", "%matches_dir%/sfm_data.json", "-o", "%matches_dir%"] ], [ "Incremental reconstruction", os.path.join(OPENMVG_BIN, "openMVG_main_IncrementalSfM"), ["-i", "%matches_dir%/sfm_data.json", "-m", "%matches_dir%", "-o", "%reconstruction_dir%"] ], [ "Colorize Structure", os.path.join(OPENMVG_BIN,"openMVG_main_ComputeSfM_DataColor"), ["-i", "%reconstruction_dir%/sfm_data.bin", "-o", "%reconstruction_dir%/colorized.ply"]], [ "Structure from Known Poses", os.path.join(OPENMVG_BIN,"openMVG_main_ComputeStructureFromKnownPoses"), ["-i", "%reconstruction_dir%/sfm_data.bin", "-m", "%matches_dir%", "-f", "%matches_dir%/matches.f.bin", "-o", "%reconstruction_dir%/robust.bin"]], [ "Colorized robust triangulation", os.path.join(OPENMVG_BIN,"openMVG_main_ComputeSfM_DataColor"), ["-i", "%reconstruction_dir%/robust.bin", "-o", "%reconstruction_dir%/robust_colorized.ply"]], [ "Export to openMVS", os.path.join(OPENMVG_BIN,"openMVG_main_openMVG2openMVS"), ["-i", "%reconstruction_dir%/sfm_data.bin", "-o", "%mvs_dir%/scene.mvs","-d","%mvs_dir%"]], [ "Densify point cloud", os.path.join(OPENMVS_BIN,"DensifyPointCloud"), ["scene.mvs", "-w","%mvs_dir%"]], [ "Reconstruct the mesh", os.path.join(OPENMVS_BIN,"ReconstructMesh"), ["scene_dense.mvs", "-w","%mvs_dir%"]], [ "Refine the mesh", os.path.join(OPENMVS_BIN,"RefineMesh"), ["scene_dense_mesh.mvs", "-w","%mvs_dir%"]], [ "Texture the mesh", os.path.join(OPENMVS_BIN,"TextureMesh"), ["scene_dense_mesh_refine.mvs", "-w","%mvs_dir%"]] ] def __getitem__(self, indice): return aStep(*self.steps_data[indice]) def length(self): return len(self.steps_data) def apply_conf(self, conf): """ replace each %var% per conf.var value in steps data """ for s in self.steps_data : o2=[] for o in s[2]: co=o.replace("%input_dir%",conf.input_dir) co=co.replace("%output_dir%",conf.output_dir) co=co.replace("%matches_dir%",conf.matches_dir) co=co.replace("%reconstruction_dir%",conf.reconstruction_dir) co=co.replace("%mvs_dir%",conf.mvs_dir) co=co.replace("%camera_file_params%",conf.camera_file_params) o2.append(co) s[2]=o2 steps=stepsStore() ## ARGS import argparse parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="Photogrammetry reconstruction with these steps : \r\n"+ "\r\n".join(("\t%i. %s\t %s" % (t, steps[t].info, steps[t].cmd) for t in range(steps.length()))) ) parser.add_argument('input_dir', help="the directory wich contains the pictures set.") parser.add_argument('output_dir', help="the directory wich will contain the resulting files.") parser.add_argument('-f','--first_step', type=int, default=0, help="the first step to process") parser.add_argument('-l','--last_step', type=int, default=11, help="the last step to process" ) group = parser.add_argument_group('Passthrough',description="Option to be passed to command lines (remove - in front of option names)\r\ne.g. --1 p ULTRA to use the ULTRA preset in openMVG_main_ComputeFeatures") for n in range(steps.length()) : group.add_argument('--'+str(n), nargs='+') parser.parse_args(namespace=conf) #store args in the ConfContainer ## FOLDERS def mkdir_ine(dirname): """Create the folder if not presents""" if not os.path.exists(dirname): os.mkdir(dirname) #Absolute path for input and ouput dirs conf.input_dir=os.path.abspath(conf.input_dir) conf.output_dir=os.path.abspath(conf.output_dir) if not os.path.exists(conf.input_dir): sys.exit("%s : path not found" % conf.input_dir) conf.matches_dir = os.path.join(conf.output_dir, "matches") conf.reconstruction_dir = os.path.join(conf.output_dir, "reconstruction_sequential") conf.mvs_dir = os.path.join(conf.output_dir, "mvs") conf.camera_file_params = os.path.join(CAMERA_SENSOR_WIDTH_DIRECTORY, "sensor_width_camera_database.txt") mkdir_ine(conf.output_dir) mkdir_ine(conf.matches_dir) mkdir_ine(conf.reconstruction_dir) mkdir_ine(conf.mvs_dir) steps.apply_conf(conf) ## WALK print "# Using input dir : %s" % conf.input_dir print "# output_dir : %s" % conf.output_dir print "# First step : %i" % conf.first_step print "# Last step : %i" % conf.last_step for cstep in range(conf.first_step, conf.last_step+1): printout("#%i. %s" % (cstep, steps[cstep].info), effect=INVERSE) opt=getattr(conf,str(cstep)) if opt is not None : #add - sign to short options and -- to long ones for o in range(0,len(opt),2): if len(opt[o])>1: opt[o]='-'+opt[o] opt[o]='-'+opt[o] else: opt=[] #Remove steps[cstep].opt options now defined in opt for anOpt in steps[cstep].opt : if anOpt in opt : idx=steps[cstep].opt.index(anOpt) if DEBUG : print '#\t'+'Remove '+ str(anOpt) + ' from defaults options at id ' + str(idx) del steps[cstep].opt[idx:idx+2] cmdline = [steps[cstep].cmd] + steps[cstep].opt + opt if not DEBUG : pStep = subprocess.Popen(cmdline) pStep.wait() else: print '\t'+' '.join(cmdline) i know that there is some wrong about of python ,but i don't know how to deal with it.
  • Y值eventListener会影响延迟效果的wet/dry

enter image description here

到目前为止,我已经能够创建和渲染画布和圆圈,并在svg元素和窗口上添加事件侦听器。我的想法是,我可以检测画布内部何时发生事件以及该点击事件何时离开画布。

delay_time

所以我认为我现在需要确定的是,当一个click事件确实发生在画布内部时,它与cirle有多远,如果它确实落在了圆圈的范围内,我应该重绘为只要mousedown事件处于活动状态。

非常感谢任何帮助。

2 个答案:

答案 0 :(得分:1)

我找到了一个很好的小解决方案,证实了我对打击计数器的怀疑!所有的功劳都归功于rectangleWorld,因为我大部分时间只能修改他们可用的示例。

Here's a codepen

// Draw SVG pad
function canvasApp(canvasID) {
  var theCanvas = document.getElementById(canvasID);
  var context = theCanvas.getContext("2d");

  init();

  var numShapes;
  var shapes;
  var dragIndex;
  var dragging;
  var mouseX;
  var mouseY;
  var dragHoldX;
  var dragHoldY;

  function init() {
    numShapes = 1;
    shapes = [];

    makeShapes();

    drawScreen();

    theCanvas.addEventListener("mousedown", mouseDownListener, false);
  }

  function makeShapes() {
    var i;
    var tempX;
    var tempY;
    var tempRad;
    var tempR;
    var tempG;
    var tempB;
    var tempColor;
    var tempShape;
    for (i = 0; i < numShapes; i++) {
      // My canvas element is 240x240
      tempRad = 10;
      tempX = 0 + tempRad;
      tempY = 240 - tempRad;
      tempR = Math.floor(Math.random() * 255);
      tempG = Math.floor(Math.random() * 255);
      tempB = Math.floor(Math.random() * 255);
      tempColor = "rgb(" + tempR + "," + tempG + "," + tempB + ")";
      tempShape = {
        x: tempX,
        y: tempY,
        rad: tempRad,
        color: tempColor
      };
      shapes.push(tempShape);
    }
  }

  function mouseDownListener(evt) {
    var i;
    //We are going to pay attention to the layering order of the objects so that if a mouse down occurs over more than object,
    //only the topmost one will be dragged.
    var highestIndex = -1;

    //getting mouse position correctly, being mindful of resizing that may have occured in the browser:
    var bRect = theCanvas.getBoundingClientRect();
    mouseX = (evt.clientX - bRect.left) * (theCanvas.width / bRect.width);
    mouseY = (evt.clientY - bRect.top) * (theCanvas.height / bRect.height);

    //find which shape was clicked
    for (i = 0; i < numShapes; i++) {
      if (hitTest(shapes[i], mouseX, mouseY)) {
        dragging = true;
        if (i > highestIndex) {
          //We will pay attention to the point on the object where the mouse is "holding" the object:
          dragHoldX = mouseX - shapes[i].x;
          dragHoldY = mouseY - shapes[i].y;
          highestIndex = i;
          dragIndex = i;
        }
      }
    }

    if (dragging) {
      window.addEventListener("mousemove", mouseMoveListener, false);
    }
    theCanvas.removeEventListener("mousedown", mouseDownListener, false);
    window.addEventListener("mouseup", mouseUpListener, false);

    //code below prevents the mouse down from having an effect on the main browser window:
    if (evt.preventDefault) {
      evt.preventDefault();
    } //standard
    else if (evt.returnValue) {
      evt.returnValue = false;
    } //older IE
    return false;
  }

  function mouseUpListener(evt) {
    theCanvas.addEventListener("mousedown", mouseDownListener, false);
    window.removeEventListener("mouseup", mouseUpListener, false);
    if (dragging) {
      dragging = false;
      window.removeEventListener("mousemove", mouseMoveListener, false);
    }
  }

  function mouseMoveListener(evt) {
    var posX;
    var posY;
    var shapeRad = shapes[dragIndex].rad;
    var minX = shapeRad;
    var maxX = theCanvas.width - shapeRad;
    var minY = shapeRad;
    var maxY = theCanvas.height - shapeRad;
    //getting mouse position correctly
    var bRect = theCanvas.getBoundingClientRect();
    mouseX = (evt.clientX - bRect.left) * (theCanvas.width / bRect.width);
    mouseY = (evt.clientY - bRect.top) * (theCanvas.height / bRect.height);

    // Divide by width of canvas and multiply to get percentage out of 100
    var DelayTime = ((mouseX / 240) * 100);
    // Invert returned value to get percentage out of 100
    var DelayFeedback = (100 - (mouseY / 240) * 100);

    // Set delay time as a portion of 2seconds
    delayEffect.delayTime.value = DelayTime / 100 * 2.0;
    // set delay feedback gain as value of random number
    delayFeedback.gain.value = (DelayFeedback / 100 * 1.0);

    //clamp x and y positions to prevent object from dragging outside of canvas
    posX = mouseX - dragHoldX;
    posX = (posX < minX) ? minX : ((posX > maxX) ? maxX : posX);
    posY = mouseY - dragHoldY;
    posY = (posY < minY) ? minY : ((posY > maxY) ? maxY : posY);

    shapes[dragIndex].x = posX;
    shapes[dragIndex].y = posY;

    drawScreen();
  }

  function hitTest(shape, mx, my) {

    var dx;
    var dy;
    dx = mx - shape.x;
    dy = my - shape.y;

    //a "hit" will be registered if the distance away from the center is less than the radius of the circular object
    return (dx * dx + dy * dy < shape.rad * shape.rad);
  }

  function drawShapes() {
    var i;
    for (i = 0; i < numShapes; i++) {
      context.fillStyle = shapes[i].color;
      context.beginPath();
      context.arc(shapes[i].x, shapes[i].y, shapes[i].rad, 0, 2 * Math.PI, false);
      context.closePath();
      context.fill();
    }
  }

  function drawScreen() {
    context.fillStyle = "#000000";
    context.fillRect(0, 0, theCanvas.width, theCanvas.height);

    drawShapes();
  }

}

window.addEventListener("load", windowLoadHandler, false);

function windowLoadHandler() {
  canvasApp('delayPad');
}

还有一些缺点,例如mouseMoveListener虽然限制了圆圈的移动,但会继续增加你的x&amp; y值。这意味着您必须使用现有的侦听器来检查拖动事件何时退出圆圈,或者更简单地说,您可以设置X和Y值的上限。

答案 1 :(得分:0)

您必须创建一个存储xy值的对象 在下面的示例中,我将其称为pad

此对象将同时提供画布可视化和音频处理 这些都是输出(分别是视觉和音频),而输入将是用户手势(例如mousemove)。

输入更新pad对象,输出读取

[注意]:此示例仅适用于最新的Chrome和Firefox,因为它使用的MediaElement.captureStream()尚未广泛实施。

const viz_out = canvas.getContext('2d');
let aud_out, mainVolume;

// our pad object holding the coordinates
const pad = {
  x: 0,
  y: 0,
  down: false,
  rad: 10
};

let canvRect = canvas.getBoundingClientRect();

function mousemove(event) {
  if (!aud_out || !pad.down) {
    return;
  }
  pad.x = event.clientX - canvRect.left;
  pad.y = canvRect.height - (event.clientY - canvRect.top); // inverts y axis
  // all actions are splitted
  updateViz();
  updateAud();
  updateLog();
}

viz_out.setTransform(1, 0, 0, -1, 0, 300) // invert y axis on the canvas too
// simply draws a circle where at our pad's coords
function updateViz() {
  viz_out.clearRect(0, 0, canvas.width, canvas.height);
  viz_out.beginPath();
  viz_out.arc(pad.x, pad.y, pad.rad, 0, Math.PI * 2);
  viz_out.fill();
}
// You'll do it as you wish, here it just modifies a biquadFilter
function updateAud() {
  const default_freq = 350;
  const max_freq = 6000;
  const y_ratio = pad.y / 300;
  aud_out.frequency.value = (default_freq + (max_freq * y_ratio)) - default_freq;
  aud_out.Q.value = (pad.x / 300) * 10;
  mainVolume.value = 1 + ((pad.y + pad.x) / 75);
}

function updateLog() {
  log.textContent = `x:${~~pad.x} y:${~~pad.y}`;
}
canvas.addEventListener('mousedown', e => pad.down = true);
canvas.addEventListener('mouseup', e => pad.down = false);
canvas.addEventListener('mousemove', mousemove);


btn.onclick = e => {
  btn.textContent = 'stop';
  startLoadingAudio();
  btn.onclick = e => {
    mainVolume.value = 0;
  }
}

window.onscroll = window.onresize = e => canvRect = canvas.getBoundingClientRect();

function startLoadingAudio() {
  const audio = new Audio();
  audio.loop = true;
  audio.muted = true;
  audio.onloadedmetadata = e => {
    audio.play();
    const stream = audio.captureStream ? audio.captureStream() : audio.mozCaptureStream();
    initAudioProcessor(stream);
    updateLog();
    window.onscroll();
    updateViz();
  }
  // FF will "taint" the stream, even if the media is served with correct CORS...
  fetch("https://dl.dropboxusercontent.com/s/8c9m92u1euqnkaz/GershwinWhiteman-RhapsodyInBluePart1.mp3").then(resp => resp.blob()).then(b => audio.src = URL.createObjectURL(b));

  function initAudioProcessor(stream) {
    var a_ctx = new AudioContext();
    var gainNode = a_ctx.createGain();
    var biquadFilter = a_ctx.createBiquadFilter();
    var source = a_ctx.createMediaStreamSource(stream);
    source.connect(biquadFilter);
    biquadFilter.connect(gainNode);
    gainNode.connect(a_ctx.destination);
    aud_out = biquadFilter;
    mainVolume = gainNode.gain;
    biquadFilter.type = "bandpass";
  }
}
canvas {
  border: 1px solid;
}
<button id="btn">
start
</button>
<pre id="log"></pre>
<canvas id="canvas" width="300" height="300"></canvas>