ClaireOzzz commited on
Commit
bd1d3d4
1 Parent(s): 8f17adb

Update depthgltf/app_visualisations.py

Browse files
Files changed (1) hide show
  1. depthgltf/app_visualisations.py +5 -5
depthgltf/app_visualisations.py CHANGED
@@ -106,19 +106,19 @@ title = "Demo: zero-shot depth estimation with DPT + 3D Point Cloud"
106
  description = "This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object."
107
  #examples = [["examples/" + img] for img in os.listdir("examples/")]
108
 
109
- result_image_path = os.path.join(current_directory, '..', 'result.png')
110
- image_path = Path(result_image_path)
111
 
112
 
113
  # Load the image
114
- rawimage = Image.open(image_path)
115
- image_r = gr.Image(value=rawimage, type="pil", label="Input Image")
116
  #image_r.change(create_visual_demo, [],[])
117
 
118
  def create_visual_demo():
119
  iface = gr.Interface(fn=process_image,
120
  inputs=[gr.Image(
121
- type="filepath", label="Input Image", value=image_path)],
122
  outputs=[gr.Image(label="predicted depth", type="pil"),
123
  gr.Model3D(label="3d mesh reconstruction", clear_color=[
124
  1.0, 1.0, 1.0, 1.0]),
 
106
  description = "This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object."
107
  #examples = [["examples/" + img] for img in os.listdir("examples/")]
108
 
109
+ # result_image_path = os.path.join(current_directory, '..', 'result.png')
110
+ # image_path = Path(result_image_path)
111
 
112
 
113
  # Load the image
114
+ # rawimage = Image.open(image_path)
115
+ # image_r = gr.Image(value=rawimage, type="pil", label="Input Image")
116
  #image_r.change(create_visual_demo, [],[])
117
 
118
  def create_visual_demo():
119
  iface = gr.Interface(fn=process_image,
120
  inputs=[gr.Image(
121
+ type="filepath", label="Input Image")],
122
  outputs=[gr.Image(label="predicted depth", type="pil"),
123
  gr.Model3D(label="3d mesh reconstruction", clear_color=[
124
  1.0, 1.0, 1.0, 1.0]),