Initial commit
BIN
Python/MinecraftPy/assets/Grass_block.blend
Normal file
BIN
Python/MinecraftPy/assets/Sans.png
Normal file
|
After Width: | Height: | Size: 2.4 KiB |
BIN
Python/MinecraftPy/assets/arm.blend
Normal file
BIN
Python/MinecraftPy/assets/arm.blend1
Normal file
13
Python/MinecraftPy/assets/arm.mtl
Normal file
@@ -0,0 +1,13 @@
|
||||
# Blender MTL File: 'arm.blend'
|
||||
# Material Count: 1
|
||||
|
||||
newmtl Arm
|
||||
Ns 0.297520
|
||||
Ka 1.000000 1.000000 1.000000
|
||||
Kd 0.800000 0.800000 0.800000
|
||||
Ks 0.315152 0.315152 0.315152
|
||||
Ke 0.000000 0.000000 0.000000
|
||||
Ni 1.450000
|
||||
d 1.000000
|
||||
illum 2
|
||||
map_Kd arm.png
|
||||
40
Python/MinecraftPy/assets/arm.obj
Normal file
@@ -0,0 +1,40 @@
|
||||
# Blender v2.83.0 OBJ File: 'arm.blend'
|
||||
# www.blender.org
|
||||
mtllib arm.mtl
|
||||
o Cube_Cube.001
|
||||
v 0.000000 -0.500000 0.000000
|
||||
v 0.000000 0.500000 0.000000
|
||||
v 0.000000 -0.500000 -3.000000
|
||||
v 0.000000 0.500000 -3.000000
|
||||
v 1.000000 -0.500000 0.000000
|
||||
v 1.000000 0.500000 0.000000
|
||||
v 1.000000 -0.500000 -3.000000
|
||||
v 1.000000 0.500000 -3.000000
|
||||
vt 0.930330 0.500000
|
||||
vt 0.930330 0.622951
|
||||
vt 0.561476 0.622951
|
||||
vt 0.561476 0.500000
|
||||
vt 0.438524 0.622951
|
||||
vt 0.438524 0.500000
|
||||
vt 0.069670 0.622951
|
||||
vt 0.069670 0.500000
|
||||
vt 0.438524 0.131146
|
||||
vt 0.438524 0.008194
|
||||
vt 0.561476 0.008194
|
||||
vt 0.561476 0.131146
|
||||
vt 0.561476 0.991806
|
||||
vt 0.438524 0.991806
|
||||
vn -1.0000 0.0000 0.0000
|
||||
vn 0.0000 0.0000 -1.0000
|
||||
vn 1.0000 0.0000 0.0000
|
||||
vn 0.0000 0.0000 1.0000
|
||||
vn 0.0000 -1.0000 0.0000
|
||||
vn 0.0000 1.0000 0.0000
|
||||
usemtl Arm
|
||||
s off
|
||||
f 1/1/1 2/2/1 4/3/1 3/4/1
|
||||
f 3/4/2 4/3/2 8/5/2 7/6/2
|
||||
f 7/6/3 8/5/3 6/7/3 5/8/3
|
||||
f 5/9/4 6/10/4 2/11/4 1/12/4
|
||||
f 3/4/5 7/6/5 5/9/5 1/12/5
|
||||
f 8/5/6 4/3/6 2/13/6 6/14/6
|
||||
BIN
Python/MinecraftPy/assets/arm_texture.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
Python/MinecraftPy/assets/arm_texture.psd
Normal file
13
Python/MinecraftPy/assets/block.mtl
Normal file
@@ -0,0 +1,13 @@
|
||||
# Blender MTL File: 'None'
|
||||
# Material Count: 1
|
||||
|
||||
newmtl Stone
|
||||
Ns 225.000000
|
||||
Ka 1.000000 1.000000 1.000000
|
||||
Kd 0.800000 0.800000 0.800000
|
||||
Ks 0.500000 0.500000 0.500000
|
||||
Ke 0.000000 0.000000 0.000000
|
||||
Ni 1.450000
|
||||
d 1.000000
|
||||
illum 2
|
||||
map_Kd grass_block.png
|
||||
40
Python/MinecraftPy/assets/block.obj
Normal file
@@ -0,0 +1,40 @@
|
||||
# Blender v2.81 (sub 16) OBJ File: ''
|
||||
# www.blender.org
|
||||
mtllib block.mtl
|
||||
o Cube
|
||||
v -1.000000 -1.000000 1.000000
|
||||
v -1.000000 1.000000 1.000000
|
||||
v -1.000000 -1.000000 -1.000000
|
||||
v -1.000000 1.000000 -1.000000
|
||||
v 1.000000 -1.000000 1.000000
|
||||
v 1.000000 1.000000 1.000000
|
||||
v 1.000000 -1.000000 -1.000000
|
||||
v 1.000000 1.000000 -1.000000
|
||||
vt 0.375000 0.000000
|
||||
vt 0.625000 0.000000
|
||||
vt 0.625000 0.250000
|
||||
vt 0.375000 0.250000
|
||||
vt 0.625000 0.500000
|
||||
vt 0.375000 0.500000
|
||||
vt 0.625000 0.750000
|
||||
vt 0.375000 0.750000
|
||||
vt 0.625000 1.000000
|
||||
vt 0.375000 1.000000
|
||||
vt 0.125000 0.500000
|
||||
vt 0.125000 0.750000
|
||||
vt 0.875000 0.500000
|
||||
vt 0.875000 0.750000
|
||||
vn -1.0000 0.0000 0.0000
|
||||
vn 0.0000 0.0000 -1.0000
|
||||
vn 1.0000 0.0000 0.0000
|
||||
vn 0.0000 0.0000 1.0000
|
||||
vn 0.0000 -1.0000 0.0000
|
||||
vn 0.0000 1.0000 0.0000
|
||||
usemtl Stone
|
||||
s off
|
||||
f 1/1/1 2/2/1 4/3/1 3/4/1
|
||||
f 3/4/2 4/3/2 8/5/2 7/6/2
|
||||
f 7/6/3 8/5/3 6/7/3 5/8/3
|
||||
f 5/8/4 6/7/4 2/9/4 1/10/4
|
||||
f 3/11/5 7/6/5 5/8/5 1/12/5
|
||||
f 8/5/6 4/13/6 2/14/6 6/7/6
|
||||
BIN
Python/MinecraftPy/assets/block_textures.psd
Normal file
BIN
Python/MinecraftPy/assets/brick_block.png
Normal file
|
After Width: | Height: | Size: 7.9 KiB |
BIN
Python/MinecraftPy/assets/dirt_block.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
Python/MinecraftPy/assets/grass_block.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
83
Python/MinecraftPy/assets/punch.xmp
Normal file
@@ -0,0 +1,83 @@
|
||||
<?xpacket begin="" id="W5M0MpCehiHzreSzNTczkc9d"?>
|
||||
<x:xmpmeta xmlns:x="adobe:ns:meta/" x:xmptk="Adobe XMP Core 6.0-c003 79.164527, 2020/10/15-17:48:32 ">
|
||||
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
|
||||
<rdf:Description rdf:about=""
|
||||
xmlns:xmpDM="http://ns.adobe.com/xmp/1.0/DynamicMedia/"
|
||||
xmlns:xmp="http://ns.adobe.com/xap/1.0/"
|
||||
xmlns:xmpMM="http://ns.adobe.com/xap/1.0/mm/"
|
||||
xmlns:stEvt="http://ns.adobe.com/xap/1.0/sType/ResourceEvent#"
|
||||
xmlns:stRef="http://ns.adobe.com/xap/1.0/sType/ResourceRef#"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<xmpDM:Tracks>
|
||||
<rdf:Bag>
|
||||
<rdf:li rdf:parseType="Resource">
|
||||
<xmpDM:trackName>CuePoint Markers</xmpDM:trackName>
|
||||
<xmpDM:trackType>Cue</xmpDM:trackType>
|
||||
<xmpDM:frameRate>f44100</xmpDM:frameRate>
|
||||
</rdf:li>
|
||||
<rdf:li rdf:parseType="Resource">
|
||||
<xmpDM:trackName>CD Track Markers</xmpDM:trackName>
|
||||
<xmpDM:trackType>Track</xmpDM:trackType>
|
||||
<xmpDM:frameRate>f44100</xmpDM:frameRate>
|
||||
</rdf:li>
|
||||
<rdf:li rdf:parseType="Resource">
|
||||
<xmpDM:trackName>Subclip Markers</xmpDM:trackName>
|
||||
<xmpDM:trackType>InOut</xmpDM:trackType>
|
||||
<xmpDM:frameRate>f44100</xmpDM:frameRate>
|
||||
</rdf:li>
|
||||
</rdf:Bag>
|
||||
</xmpDM:Tracks>
|
||||
<xmp:MetadataDate>2020-11-30T21:02:06Z</xmp:MetadataDate>
|
||||
<xmp:ModifyDate>2020-11-30T21:02:06Z</xmp:ModifyDate>
|
||||
<xmpMM:InstanceID>xmp.iid:d014fa67-e7e1-154a-b2d4-79210a0240b4</xmpMM:InstanceID>
|
||||
<xmpMM:DocumentID>xmp.did:d014fa67-e7e1-154a-b2d4-79210a0240b4</xmpMM:DocumentID>
|
||||
<xmpMM:OriginalDocumentID>xmp.did:2825da43-b730-0a47-9090-961c6d284b13</xmpMM:OriginalDocumentID>
|
||||
<xmpMM:History>
|
||||
<rdf:Seq>
|
||||
<rdf:li rdf:parseType="Resource">
|
||||
<stEvt:action>saved</stEvt:action>
|
||||
<stEvt:instanceID>xmp.iid:2825da43-b730-0a47-9090-961c6d284b13</stEvt:instanceID>
|
||||
<stEvt:when>2020-11-30T21:02:06Z</stEvt:when>
|
||||
<stEvt:softwareAgent>Adobe Audition 13.0 (Windows)</stEvt:softwareAgent>
|
||||
<stEvt:changed>/metadata</stEvt:changed>
|
||||
</rdf:li>
|
||||
<rdf:li rdf:parseType="Resource">
|
||||
<stEvt:action>saved</stEvt:action>
|
||||
<stEvt:instanceID>xmp.iid:d014fa67-e7e1-154a-b2d4-79210a0240b4</stEvt:instanceID>
|
||||
<stEvt:when>2020-11-30T21:02:06Z</stEvt:when>
|
||||
<stEvt:softwareAgent>Adobe Audition 13.0 (Windows)</stEvt:softwareAgent>
|
||||
<stEvt:changed>/</stEvt:changed>
|
||||
</rdf:li>
|
||||
</rdf:Seq>
|
||||
</xmpMM:History>
|
||||
<xmpMM:DerivedFrom rdf:parseType="Resource">
|
||||
<stRef:instanceID>xmp.iid:2825da43-b730-0a47-9090-961c6d284b13</stRef:instanceID>
|
||||
<stRef:documentID>xmp.did:2825da43-b730-0a47-9090-961c6d284b13</stRef:documentID>
|
||||
<stRef:originalDocumentID>xmp.did:2825da43-b730-0a47-9090-961c6d284b13</stRef:originalDocumentID>
|
||||
</xmpMM:DerivedFrom>
|
||||
<dc:format>audio/ogg; codec="vorbis"</dc:format>
|
||||
</rdf:Description>
|
||||
</rdf:RDF>
|
||||
</x:xmpmeta>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<?xpacket end="w"?>
|
||||
BIN
Python/MinecraftPy/assets/punch_sound.wav
Normal file
BIN
Python/MinecraftPy/assets/skybox.png
Normal file
|
After Width: | Height: | Size: 986 KiB |
BIN
Python/MinecraftPy/assets/stone_block.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
92
Python/MinecraftPy/base.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from ursina import *
|
||||
from ursina.prefabs.first_person_controller import FirstPersonController
|
||||
|
||||
app = Ursina()
|
||||
window.fps_counter.enabled = False
|
||||
window.exit_button.visible = False
|
||||
|
||||
grass_texture = load_texture('assets/grass_block.png')
|
||||
stone_texture = load_texture('assets/stone_block.png')
|
||||
brick_texture = load_texture('assets/brick_block.png')
|
||||
dirt_texture = load_texture('assets/dirt_block.png')
|
||||
sky_texture = load_texture('assets/skybox.png')
|
||||
arm_texture = load_texture('assets/arm_texture.png')
|
||||
|
||||
block_pick = 1
|
||||
|
||||
punch_sound = Audio('assets/punch_sound', loop = False, autoplay = False, volume = 0.2)
|
||||
|
||||
def update():
|
||||
global block_pick
|
||||
|
||||
if held_keys['left mouse'] or held_keys['right mouse']:
|
||||
hand.active()
|
||||
else:
|
||||
hand.passive()
|
||||
|
||||
if held_keys['1']: block_pick = 1
|
||||
if held_keys['2']: block_pick = 2
|
||||
if held_keys['3']: block_pick = 3
|
||||
if held_keys['4']: block_pick = 4
|
||||
|
||||
class Voxel(Button):
|
||||
def __init__(self, position = (0, 0, 0), texture = grass_texture):
|
||||
super().__init__(
|
||||
parent = scene,
|
||||
position = position,
|
||||
model = 'assets/block',
|
||||
origin_y = 0.5,
|
||||
texture = texture,
|
||||
color = color.color(0, 0, random.uniform(0.9, 1)),
|
||||
scale = 0.5
|
||||
)
|
||||
|
||||
def input(self, key):
|
||||
if self.hovered:
|
||||
if key == 'left mouse down':
|
||||
punch_sound.play()
|
||||
if block_pick == 1: Voxel(position= self.position + mouse.normal, texture = grass_texture)
|
||||
if block_pick == 2: Voxel(position= self.position + mouse.normal, texture = stone_texture)
|
||||
if block_pick == 3: Voxel(position= self.position + mouse.normal, texture = brick_texture)
|
||||
if block_pick == 4: Voxel(position= self.position + mouse.normal, texture = dirt_texture)
|
||||
|
||||
if key == 'right mouse down':
|
||||
punch_sound.play()
|
||||
destroy(self)
|
||||
|
||||
class Sky(Entity):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
parent = scene,
|
||||
model = 'sphere',
|
||||
texture = sky_texture,
|
||||
scale = 150,
|
||||
double_sided = True
|
||||
)
|
||||
|
||||
class Hand(Entity):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
parent = camera.ui,
|
||||
model = 'assets/arm',
|
||||
texture = arm_texture,
|
||||
scale = 0.2,
|
||||
rotation = Vec3(150, -10, 0),
|
||||
position = Vec2(0.6, -0.6)
|
||||
)
|
||||
|
||||
def active(self):
|
||||
self.position = Vec2(0.5, -0.5)
|
||||
|
||||
def passive(self):
|
||||
self.position = Vec2(0.6, -0.6)
|
||||
|
||||
for z in range(20):
|
||||
for x in range(20):
|
||||
Voxel(position = (x, 0, z))
|
||||
|
||||
player = FirstPersonController()
|
||||
sky = Sky()
|
||||
hand = Hand()
|
||||
|
||||
app.run()
|
||||
3
Python/Real-Time Face Mask Detection OpenCV Python/.idea/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
10
Python/Real-Time Face Mask Detection OpenCV Python/.idea/Face-Mask-Detection-master.iml
generated
Normal file
@@ -0,0 +1,10 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="PYTHON_MODULE" version="4">
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$">
|
||||
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
||||
</content>
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
||||
25
Python/Real-Time Face Mask Detection OpenCV Python/.idea/inspectionProfiles/Project_Default.xml
generated
Normal file
@@ -0,0 +1,25 @@
|
||||
<component name="InspectionProjectProfileManager">
|
||||
<profile version="1.0">
|
||||
<option name="myName" value="Project Default" />
|
||||
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
||||
<option name="ignoredPackages">
|
||||
<value>
|
||||
<list size="3">
|
||||
<item index="0" class="java.lang.String" itemvalue="dlib" />
|
||||
<item index="1" class="java.lang.String" itemvalue="opencv-python" />
|
||||
<item index="2" class="java.lang.String" itemvalue="numpy" />
|
||||
</list>
|
||||
</value>
|
||||
</option>
|
||||
</inspection_tool>
|
||||
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
||||
<option name="ignoredIdentifiers">
|
||||
<list>
|
||||
<option value="Tkconstants" />
|
||||
<option value="tkFileDialog" />
|
||||
<option value="dlib" />
|
||||
</list>
|
||||
</option>
|
||||
</inspection_tool>
|
||||
</profile>
|
||||
</component>
|
||||
@@ -0,0 +1,6 @@
|
||||
<component name="InspectionProjectProfileManager">
|
||||
<settings>
|
||||
<option name="USE_PROJECT_PROFILE" value="false" />
|
||||
<version value="1.0" />
|
||||
</settings>
|
||||
</component>
|
||||
4
Python/Real-Time Face Mask Detection OpenCV Python/.idea/misc.xml
generated
Normal file
@@ -0,0 +1,4 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (Face-Mask-Detection-master)" project-jdk-type="Python SDK" />
|
||||
</project>
|
||||
8
Python/Real-Time Face Mask Detection OpenCV Python/.idea/modules.xml
generated
Normal file
@@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/Face-Mask-Detection-master.iml" filepath="$PROJECT_DIR$/.idea/Face-Mask-Detection-master.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
||||
128
Python/Real-Time Face Mask Detection OpenCV Python/main.py
Normal file
@@ -0,0 +1,128 @@
|
||||
# import the necessary packages
|
||||
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
|
||||
from tensorflow.keras.preprocessing.image import img_to_array
|
||||
from tensorflow.keras.models import load_model
|
||||
from imutils.video import VideoStream
|
||||
import numpy as np
|
||||
import imutils
|
||||
import time
|
||||
import cv2
|
||||
import os
|
||||
|
||||
def detect_and_predict_mask(frame, faceNet, maskNet):
|
||||
# grab the dimensions of the frame and then construct a blob
|
||||
# from it
|
||||
(h, w) = frame.shape[:2]
|
||||
blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),
|
||||
(104.0, 177.0, 123.0))
|
||||
|
||||
# pass the blob through the network and obtain the face detections
|
||||
faceNet.setInput(blob)
|
||||
detections = faceNet.forward()
|
||||
print(detections.shape)
|
||||
|
||||
# initialize our list of faces, their corresponding locations,
|
||||
# and the list of predictions from our face mask network
|
||||
faces = []
|
||||
locs = []
|
||||
preds = []
|
||||
|
||||
# loop over the detections
|
||||
for i in range(0, detections.shape[2]):
|
||||
# extract the confidence (i.e., probability) associated with
|
||||
# the detection
|
||||
confidence = detections[0, 0, i, 2]
|
||||
|
||||
# filter out weak detections by ensuring the confidence is
|
||||
# greater than the minimum confidence
|
||||
if confidence > 0.5:
|
||||
# compute the (x, y)-coordinates of the bounding box for
|
||||
# the object
|
||||
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
|
||||
(startX, startY, endX, endY) = box.astype("int")
|
||||
|
||||
# ensure the bounding boxes fall within the dimensions of
|
||||
# the frame
|
||||
(startX, startY) = (max(0, startX), max(0, startY))
|
||||
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
|
||||
|
||||
# extract the face ROI, convert it from BGR to RGB channel
|
||||
# ordering, resize it to 224x224, and preprocess it
|
||||
face = frame[startY:endY, startX:endX]
|
||||
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
|
||||
face = cv2.resize(face, (224, 224))
|
||||
face = img_to_array(face)
|
||||
face = preprocess_input(face)
|
||||
|
||||
# add the face and bounding boxes to their respective
|
||||
# lists
|
||||
faces.append(face)
|
||||
locs.append((startX, startY, endX, endY))
|
||||
|
||||
# only make a predictions if at least one face was detected
|
||||
if len(faces) > 0:
|
||||
# for faster inference we'll make batch predictions on *all*
|
||||
# faces at the same time rather than one-by-one predictions
|
||||
# in the above `for` loop
|
||||
faces = np.array(faces, dtype="float32")
|
||||
preds = maskNet.predict(faces, batch_size=32)
|
||||
|
||||
# return a 2-tuple of the face locations and their corresponding
|
||||
# locations
|
||||
return (locs, preds)
|
||||
|
||||
# load our serialized face detector model from disk
|
||||
prototxtPath = r"face_detector\deploy.prototxt"
|
||||
weightsPath = r"face_detector\res10_300x300_ssd_iter_140000.caffemodel"
|
||||
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
|
||||
|
||||
# load the face mask detector model from disk
|
||||
maskNet = load_model("mask_detector.model")
|
||||
|
||||
# initialize the video stream
|
||||
print("[INFO] starting video stream...")
|
||||
vs = VideoStream(src=0).start()
|
||||
|
||||
# loop over the frames from the video stream
|
||||
while True:
|
||||
# grab the frame from the threaded video stream and resize it
|
||||
# to have a maximum width of 400 pixels
|
||||
frame = vs.read()
|
||||
frame = imutils.resize(frame, width=400)
|
||||
|
||||
# detect faces in the frame and determine if they are wearing a
|
||||
# face mask or not
|
||||
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
|
||||
|
||||
# loop over the detected face locations and their corresponding
|
||||
# locations
|
||||
for (box, pred) in zip(locs, preds):
|
||||
# unpack the bounding box and predictions
|
||||
(startX, startY, endX, endY) = box
|
||||
(mask, withoutMask) = pred
|
||||
|
||||
# determine the class label and color we'll use to draw
|
||||
# the bounding box and text
|
||||
label = "Mask" if mask > withoutMask else "No Mask"
|
||||
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
|
||||
|
||||
# include the probability in the label
|
||||
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
|
||||
|
||||
# display the label and bounding box rectangle on the output
|
||||
# frame
|
||||
cv2.putText(frame, label, (startX, startY - 10),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
|
||||
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
|
||||
|
||||
# show the output frame
|
||||
cv2.imshow("Frame", frame)
|
||||
key = cv2.waitKey(1) & 0xFF
|
||||
|
||||
# if the `q` key was pressed, break from the loop
|
||||
if key == ord("q"):
|
||||
break
|
||||
|
||||
# do a bit of cleanup
|
||||
cv2.destroyAllWindows()
|
||||
vs.stop()
|
||||
@@ -0,0 +1,7 @@
|
||||
tensorflow>=1.15.2
|
||||
keras==2.3.1
|
||||
imutils==0.5.3
|
||||
numpy==1.18.2
|
||||
opencv-python==4.2.0.*
|
||||
matplotlib==3.2.1
|
||||
scipy==1.4.1
|
||||
@@ -0,0 +1,137 @@
|
||||
# import the necessary packages
|
||||
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||
from tensorflow.keras.applications import MobileNetV2
|
||||
from tensorflow.keras.layers import AveragePooling2D
|
||||
from tensorflow.keras.layers import Dropout
|
||||
from tensorflow.keras.layers import Flatten
|
||||
from tensorflow.keras.layers import Dense
|
||||
from tensorflow.keras.layers import Input
|
||||
from tensorflow.keras.models import Model
|
||||
from tensorflow.keras.optimizers import Adam
|
||||
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
|
||||
from tensorflow.keras.preprocessing.image import img_to_array
|
||||
from tensorflow.keras.preprocessing.image import load_img
|
||||
from tensorflow.keras.utils import to_categorical
|
||||
from sklearn.preprocessing import LabelBinarizer
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.metrics import classification_report
|
||||
from imutils import paths
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
# initialize the initial learning rate, number of epochs to train for,
|
||||
# and batch size
|
||||
INIT_LR = 1e-4
|
||||
EPOCHS = 20
|
||||
BS = 32
|
||||
|
||||
DIRECTORY = r"C:\Mask Detection\CODE\Face-Mask-Detection-master\dataset"
|
||||
CATEGORIES = ["with_mask", "without_mask"]
|
||||
|
||||
# grab the list of images in our dataset directory, then initialize
|
||||
# the list of data (i.e., images) and class images
|
||||
print("[INFO] loading images...")
|
||||
|
||||
data = []
|
||||
labels = []
|
||||
|
||||
for category in CATEGORIES:
|
||||
path = os.path.join(DIRECTORY, category)
|
||||
for img in os.listdir(path):
|
||||
img_path = os.path.join(path, img)
|
||||
image = load_img(img_path, target_size=(224, 224))
|
||||
image = img_to_array(image)
|
||||
image = preprocess_input(image)
|
||||
|
||||
data.append(image)
|
||||
labels.append(category)
|
||||
|
||||
# perform one-hot encoding on the labels
|
||||
lb = LabelBinarizer()
|
||||
labels = lb.fit_transform(labels)
|
||||
labels = to_categorical(labels)
|
||||
|
||||
data = np.array(data, dtype="float32")
|
||||
labels = np.array(labels)
|
||||
|
||||
(trainX, testX, trainY, testY) = train_test_split(data, labels,
|
||||
test_size=0.20, stratify=labels, random_state=42)
|
||||
|
||||
# construct the training image generator for data augmentation
|
||||
aug = ImageDataGenerator(
|
||||
rotation_range=20,
|
||||
zoom_range=0.15,
|
||||
width_shift_range=0.2,
|
||||
height_shift_range=0.2,
|
||||
shear_range=0.15,
|
||||
horizontal_flip=True,
|
||||
fill_mode="nearest")
|
||||
|
||||
# load the MobileNetV2 network, ensuring the head FC layer sets are
|
||||
# left off
|
||||
baseModel = MobileNetV2(weights="imagenet", include_top=False,
|
||||
input_tensor=Input(shape=(224, 224, 3)))
|
||||
|
||||
# construct the head of the model that will be placed on top of the
|
||||
# the base model
|
||||
headModel = baseModel.output
|
||||
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
|
||||
headModel = Flatten(name="flatten")(headModel)
|
||||
headModel = Dense(128, activation="relu")(headModel)
|
||||
headModel = Dropout(0.5)(headModel)
|
||||
headModel = Dense(2, activation="softmax")(headModel)
|
||||
|
||||
# place the head FC model on top of the base model (this will become
|
||||
# the actual model we will train)
|
||||
model = Model(inputs=baseModel.input, outputs=headModel)
|
||||
|
||||
# loop over all layers in the base model and freeze them so they will
|
||||
# *not* be updated during the first training process
|
||||
for layer in baseModel.layers:
|
||||
layer.trainable = False
|
||||
|
||||
# compile our model
|
||||
print("[INFO] compiling model...")
|
||||
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
|
||||
model.compile(loss="binary_crossentropy", optimizer=opt,
|
||||
metrics=["accuracy"])
|
||||
|
||||
# train the head of the network
|
||||
print("[INFO] training head...")
|
||||
H = model.fit(
|
||||
aug.flow(trainX, trainY, batch_size=BS),
|
||||
steps_per_epoch=len(trainX) // BS,
|
||||
validation_data=(testX, testY),
|
||||
validation_steps=len(testX) // BS,
|
||||
epochs=EPOCHS)
|
||||
|
||||
# make predictions on the testing set
|
||||
print("[INFO] evaluating network...")
|
||||
predIdxs = model.predict(testX, batch_size=BS)
|
||||
|
||||
# for each image in the testing set we need to find the index of the
|
||||
# label with corresponding largest predicted probability
|
||||
predIdxs = np.argmax(predIdxs, axis=1)
|
||||
|
||||
# show a nicely formatted classification report
|
||||
print(classification_report(testY.argmax(axis=1), predIdxs,
|
||||
target_names=lb.classes_))
|
||||
|
||||
# serialize the model to disk
|
||||
print("[INFO] saving mask detector model...")
|
||||
model.save("mask_detector.model", save_format="h5")
|
||||
|
||||
# plot the training loss and accuracy
|
||||
N = EPOCHS
|
||||
plt.style.use("ggplot")
|
||||
plt.figure()
|
||||
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
|
||||
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
|
||||
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
|
||||
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
|
||||
plt.title("Training Loss and Accuracy")
|
||||
plt.xlabel("Epoch #")
|
||||
plt.ylabel("Loss/Accuracy")
|
||||
plt.legend(loc="lower left")
|
||||
plt.savefig("plot.png")
|
||||
73
Python/donut.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import os
|
||||
from math import sin, cos
|
||||
|
||||
def main():
|
||||
a=0
|
||||
b=0
|
||||
|
||||
height=24
|
||||
width=80
|
||||
#height=int(input("Enter Screen Height : "))
|
||||
#width=int(input("Enter Screen Width : "))
|
||||
|
||||
# for clearing console (windows and unix systems)
|
||||
clear = "cls"
|
||||
if os.name == "posix":
|
||||
clear = "clear"
|
||||
|
||||
os.system(clear)
|
||||
while True:
|
||||
z = [0 for _ in range(4*height*width)]
|
||||
screen = [' ' for _ in range(height*width)]
|
||||
|
||||
j=0
|
||||
while j<6.28:
|
||||
j+=0.07
|
||||
i=0
|
||||
while i<6.28:
|
||||
i+=0.02
|
||||
|
||||
sinA=sin(a)
|
||||
cosA=cos(a)
|
||||
cosB=cos(b)
|
||||
sinB=sin(b)
|
||||
|
||||
sini=sin(i)
|
||||
cosi=cos(i)
|
||||
cosj=cos(j)
|
||||
sinj=sin(j)
|
||||
|
||||
cosj2=cosj+2
|
||||
mess=1/(sini*cosj2*sinA+sinj*cosA+5)
|
||||
t=sini*cosj2*cosA-sinj* sinA
|
||||
|
||||
# 40 is the left screen shift
|
||||
x = int(40+30*mess*(cosi*cosj2*cosB-t*sinB))
|
||||
# 12 is the down screen shift
|
||||
y = int(11+15*mess*(cosi*cosj2*sinB +t*cosB))
|
||||
# all are casted to int, ie floored
|
||||
o = int(x+width*y)
|
||||
# multiplying by 8 to bring in range 0-11 as 8*(sqrt(2))=11
|
||||
# because we have 11 luminance characters
|
||||
N = int(8*((sinj*sinA-sini*cosj*cosA)*cosB-sini*cosj*sinA-sinj*cosA-cosi *cosj*sinB))
|
||||
# if x,y inside screen and previous z-buffer is < mess
|
||||
# i.e. when z[o] is 0 or the prev point is behind the new point
|
||||
# so we change it to the point nearer to the eye/ above prev point
|
||||
if 0<y<height and 0<x<width and z[o] < mess:
|
||||
z[o]=mess
|
||||
screen[o]=".,-~:;=!*#$@"[N if N>0 else 0]
|
||||
|
||||
# prints
|
||||
os.system(clear)
|
||||
for index, char in enumerate(screen):
|
||||
if index % width == 0:
|
||||
print()
|
||||
else:
|
||||
print(char, end='')
|
||||
|
||||
# increments
|
||||
a+=0.04
|
||||
b+=0.02
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
33
Python/findPrimes.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import time
|
||||
import threading
|
||||
|
||||
start_number = 1
|
||||
end_number = 50000
|
||||
|
||||
start = time.time()
|
||||
|
||||
threads = []
|
||||
primes = []
|
||||
|
||||
def calculatePrime(candidate_number):
|
||||
found_prime = True
|
||||
for div_number in range(2, candidate_number):
|
||||
if candidate_number % div_number == 0:
|
||||
found_prime = False
|
||||
break
|
||||
if found_prime:
|
||||
primes.append(candidate_number)
|
||||
|
||||
for candidate_number in range(start_number, end_number, 1):
|
||||
calculatePrime(candidate_number)
|
||||
#thread = threading.Thread(target=calculatePrime, args=(candidate_number,))
|
||||
#threads.append(thread)
|
||||
#thread.start()
|
||||
|
||||
#threads[threads.__len__() - 1].join()
|
||||
|
||||
end = round(time.time() - start, 2)
|
||||
|
||||
print('Find all primes up to: ' + str(end_number))
|
||||
print('Time elasped: ' + str(end) + ' seconds')
|
||||
print('Number of primes found ' + str(primes.__len__()))
|
||||
1
Python/helloWorld.py
Normal file
@@ -0,0 +1 @@
|
||||
print("Hallo LerneProgrammieren")
|
||||
38
Python/numberguessing.py
Normal file
@@ -0,0 +1,38 @@
|
||||
# Requirements
|
||||
import tensorflow as tf
|
||||
#tf.enable_eager_execution()
|
||||
|
||||
mnist = tf.keras.datasets.mnist
|
||||
(training_data, training_labels), (test_data, test_labels) = mnist.load_data()
|
||||
training_data, test_data = training_data / 255, test_data / 255
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
|
||||
# Neuronal Network
|
||||
model = tf.keras.Sequential([
|
||||
tf.keras.layers.Flatten(input_shape=(28, 28)),
|
||||
tf.keras.layers.Dense(128, activation=tf.nn.relu),
|
||||
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
|
||||
])
|
||||
|
||||
model.compile(
|
||||
optimizer=tf.optimizers.Adam(),
|
||||
loss='sparse_categorical_crossentropy',
|
||||
#metrics={'accuracy'}
|
||||
)
|
||||
|
||||
|
||||
|
||||
# Train Network
|
||||
model.fit(training_data, training_labels, epochs=5)
|
||||
|
||||
|
||||
|
||||
# Test Network
|
||||
model.evaluate(test_data, test_labels)
|
||||
predictions = model.predict(test_data)
|
||||
|
||||
image_index = 2
|
||||
print('True: {} \nPredict: {}'.format(test_labels[image_index], np.argmax(predictions[image_index])))
|
||||